mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-24 22:42:53 +00:00
ACRN: hv: Terminology modification in hv code
Rename sos_vm to service_vm. rename sos_vmid to service_vmid. rename sos_vm_ptr to service_vm_ptr. rename get_sos_vm to get_service_vm. rename sos_vm_gpa to service_vm_gpa. rename sos_vm_e820 to service_vm_e820. rename sos_efi_info to service_vm_efi_info. rename sos_vm_config to service_vm_config. rename sos_vm_hpa2gpa to service_vm_hpa2gpa. rename vdev_in_sos to vdev_in_service_vm. rename create_sos_vm_e820 to create_service_vm_e820. rename sos_high64_max_ram to service_vm_high64_max_ram. rename prepare_sos_vm_memmap to prepare_service_vm_memmap. rename post_uos_sworld_memory to post_user_vm_sworld_memory rename hcall_sos_offline_cpu to hcall_service_vm_offline_cpu. rename filter_mem_from_sos_e820 to filter_mem_from_service_vm_e820. rename create_sos_vm_efi_mmap_desc to create_service_vm_efi_mmap_desc. rename HC_SOS_OFFLINE_CPU to HC_SERVICE_VM_OFFLINE_CPU. rename SOS to Service VM in comments message. Tracked-On: #6744 Signed-off-by: Liu Long <long.liu@linux.intel.com> Reviewed-by: Geoffroy Van Cutsem <geoffroy.vancutsem@intel.com>
This commit is contained in:
parent
26e507a06e
commit
92b7d6a9a3
@ -65,7 +65,7 @@ struct acrn_vm_pci_dev_config *init_one_dev_config(struct pci_pdev *pdev)
|
|||||||
|
|
||||||
dev_config = &vm_config->pci_devs[vm_config->pci_dev_num];
|
dev_config = &vm_config->pci_devs[vm_config->pci_dev_num];
|
||||||
if (is_hv_owned_pdev(pdev->bdf)) {
|
if (is_hv_owned_pdev(pdev->bdf)) {
|
||||||
/* SOS need to emulate the type1 pdevs owned by HV */
|
/* Service VM need to emulate the type1 pdevs owned by HV */
|
||||||
dev_config->emu_type = PCI_DEV_TYPE_SOSEMUL;
|
dev_config->emu_type = PCI_DEV_TYPE_SOSEMUL;
|
||||||
if (is_bridge(pdev)) {
|
if (is_bridge(pdev)) {
|
||||||
dev_config->vdev_ops = &vpci_bridge_ops;
|
dev_config->vdev_ops = &vpci_bridge_ops;
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* e820.c contains the related e820 operations; like HV to get memory info for its MMU setup;
|
* e820.c contains the related e820 operations; like HV to get memory info for its MMU setup;
|
||||||
* and hide HV memory from SOS_VM...
|
* and hide HV memory from Service VM...
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static uint32_t hv_e820_entries_nr;
|
static uint32_t hv_e820_entries_nr;
|
||||||
@ -26,7 +26,7 @@ static struct e820_entry hv_e820[E820_MAX_ENTRIES];
|
|||||||
#define DBG_LEVEL_E820 6U
|
#define DBG_LEVEL_E820 6U
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief reserve some RAM, hide it from sos_vm, return its start address
|
* @brief reserve some RAM, hide it from Service VM, return its start address
|
||||||
* @param size_arg Amount of memory to be found and marked reserved
|
* @param size_arg Amount of memory to be found and marked reserved
|
||||||
* @param max_addr Maximum address below which memory is to be identified
|
* @param max_addr Maximum address below which memory is to be identified
|
||||||
*
|
*
|
||||||
|
@ -290,7 +290,7 @@ ptirq_build_physical_rte(struct acrn_vm *vm, struct ptirq_remapping_info *entry)
|
|||||||
|
|
||||||
/* add msix entry for a vm, based on msi id (phys_bdf+msix_index)
|
/* add msix entry for a vm, based on msi id (phys_bdf+msix_index)
|
||||||
* - if the entry not be added by any vm, allocate it
|
* - if the entry not be added by any vm, allocate it
|
||||||
* - if the entry already be added by sos_vm, then change the owner to current vm
|
* - if the entry already be added by Service VM, then change the owner to current vm
|
||||||
* - if the entry already be added by other vm, return NULL
|
* - if the entry already be added by other vm, return NULL
|
||||||
*/
|
*/
|
||||||
static struct ptirq_remapping_info *add_msix_remapping(struct acrn_vm *vm,
|
static struct ptirq_remapping_info *add_msix_remapping(struct acrn_vm *vm,
|
||||||
@ -351,7 +351,7 @@ remove_msix_remapping(const struct acrn_vm *vm, uint16_t phys_bdf, uint32_t entr
|
|||||||
|
|
||||||
/* add intx entry for a vm, based on intx id (phys_pin)
|
/* add intx entry for a vm, based on intx id (phys_pin)
|
||||||
* - if the entry not be added by any vm, allocate it
|
* - if the entry not be added by any vm, allocate it
|
||||||
* - if the entry already be added by sos_vm, then change the owner to current vm
|
* - if the entry already be added by Service VM, then change the owner to current vm
|
||||||
* - if the entry already be added by other vm, return NULL
|
* - if the entry already be added by other vm, return NULL
|
||||||
*/
|
*/
|
||||||
static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi,
|
static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi,
|
||||||
@ -398,7 +398,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ptirq entry is either created or transferred from SOS VM to Post-launched VM
|
* ptirq entry is either created or transferred from Service VM to Post-launched VM
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (entry != NULL) {
|
if (entry != NULL) {
|
||||||
@ -781,7 +781,7 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ct
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* @pre vm != NULL
|
/* @pre vm != NULL
|
||||||
* except sos_vm, Device Model should call this function to pre-hold ptdev intx
|
* except Service VM, Device Model should call this function to pre-hold ptdev intx
|
||||||
* entries:
|
* entries:
|
||||||
* - the entry is identified by phys_pin:
|
* - the entry is identified by phys_pin:
|
||||||
* one entry vs. one phys_pin
|
* one entry vs. one phys_pin
|
||||||
|
@ -184,8 +184,8 @@ void init_ept_pgtable(struct pgtable *table, uint16_t vm_id)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* To enable the identical map and support of legacy devices/ACPI method in SOS,
|
* To enable the identical map and support of legacy devices/ACPI method in Service VM,
|
||||||
* ACRN presents the entire host 0-4GB memory region to SOS, except the memory
|
* ACRN presents the entire host 0-4GB memory region to Service VM, except the memory
|
||||||
* regions explicitly assigned to pre-launched VMs or HV (DRAM and MMIO). However,
|
* regions explicitly assigned to pre-launched VMs or HV (DRAM and MMIO). However,
|
||||||
* virtual e820 only contains the known DRAM regions. For this reason,
|
* virtual e820 only contains the known DRAM regions. For this reason,
|
||||||
* we can't know if the GPA range is guest valid or not, by checking with
|
* we can't know if the GPA range is guest valid or not, by checking with
|
||||||
@ -260,9 +260,9 @@ uint64_t gpa2hpa(struct acrn_vm *vm, uint64_t gpa)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @pre: the gpa and hpa are identical mapping in SOS.
|
* @pre: the gpa and hpa are identical mapping in Service VM.
|
||||||
*/
|
*/
|
||||||
uint64_t sos_vm_hpa2gpa(uint64_t hpa)
|
uint64_t service_vm_hpa2gpa(uint64_t hpa)
|
||||||
{
|
{
|
||||||
return hpa;
|
return hpa;
|
||||||
}
|
}
|
||||||
|
@ -183,7 +183,7 @@ static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t
|
|||||||
guest_wakeup_vec32 = *(vm->pm.sx_state_data->wake_vector_32);
|
guest_wakeup_vec32 = *(vm->pm.sx_state_data->wake_vector_32);
|
||||||
clac();
|
clac();
|
||||||
|
|
||||||
pause_vm(vm); /* pause sos_vm before suspend system */
|
pause_vm(vm); /* pause Service VM before suspend system */
|
||||||
host_enter_s3(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
|
host_enter_s3(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
|
||||||
resume_vm_from_s3(vm, guest_wakeup_vec32); /* jump back to vm */
|
resume_vm_from_s3(vm, guest_wakeup_vec32); /* jump back to vm */
|
||||||
put_vm_lock(vm);
|
put_vm_lock(vm);
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
#define ENTRY_HPA1_LOW_PART2 5U
|
#define ENTRY_HPA1_LOW_PART2 5U
|
||||||
#define ENTRY_HPA1_HI 9U
|
#define ENTRY_HPA1_HI 9U
|
||||||
|
|
||||||
static struct e820_entry sos_vm_e820[E820_MAX_ENTRIES];
|
static struct e820_entry service_vm_e820[E820_MAX_ENTRIES];
|
||||||
static struct e820_entry pre_vm_e820[PRE_VM_NUM][E820_MAX_ENTRIES];
|
static struct e820_entry pre_vm_e820[PRE_VM_NUM][E820_MAX_ENTRIES];
|
||||||
|
|
||||||
uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_addr, uint64_t max_addr)
|
uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_addr, uint64_t max_addr)
|
||||||
@ -70,7 +70,7 @@ static void sort_vm_e820(struct acrn_vm *vm)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_mem_from_sos_e820(struct acrn_vm *vm, uint64_t start_pa, uint64_t end_pa)
|
static void filter_mem_from_service_vm_e820(struct acrn_vm *vm, uint64_t start_pa, uint64_t end_pa)
|
||||||
{
|
{
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint64_t entry_start;
|
uint64_t entry_start;
|
||||||
@ -79,7 +79,7 @@ static void filter_mem_from_sos_e820(struct acrn_vm *vm, uint64_t start_pa, uint
|
|||||||
struct e820_entry *entry, new_entry = {0};
|
struct e820_entry *entry, new_entry = {0};
|
||||||
|
|
||||||
for (i = 0U; i < entries_count; i++) {
|
for (i = 0U; i < entries_count; i++) {
|
||||||
entry = &sos_vm_e820[i];
|
entry = &service_vm_e820[i];
|
||||||
entry_start = entry->baseaddr;
|
entry_start = entry->baseaddr;
|
||||||
entry_end = entry->baseaddr + entry->length;
|
entry_end = entry->baseaddr + entry->length;
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ static void filter_mem_from_sos_e820(struct acrn_vm *vm, uint64_t start_pa, uint
|
|||||||
if (new_entry.length > 0UL) {
|
if (new_entry.length > 0UL) {
|
||||||
entries_count++;
|
entries_count++;
|
||||||
ASSERT(entries_count <= E820_MAX_ENTRIES, "e820 entry overflow");
|
ASSERT(entries_count <= E820_MAX_ENTRIES, "e820 entry overflow");
|
||||||
entry = &sos_vm_e820[entries_count - 1U];
|
entry = &service_vm_e820[entries_count - 1U];
|
||||||
entry->baseaddr = new_entry.baseaddr;
|
entry->baseaddr = new_entry.baseaddr;
|
||||||
entry->length = new_entry.length;
|
entry->length = new_entry.length;
|
||||||
entry->type = new_entry.type;
|
entry->type = new_entry.type;
|
||||||
@ -131,46 +131,46 @@ static void filter_mem_from_sos_e820(struct acrn_vm *vm, uint64_t start_pa, uint
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* before boot sos_vm(service OS), call it to hide HV and prelaunched VM memory in e820 table from sos_vm
|
* before boot Service VM, call it to hide HV and prelaunched VM memory in e820 table from Service VM
|
||||||
*
|
*
|
||||||
* @pre vm != NULL
|
* @pre vm != NULL
|
||||||
*/
|
*/
|
||||||
void create_sos_vm_e820(struct acrn_vm *vm)
|
void create_service_vm_e820(struct acrn_vm *vm)
|
||||||
{
|
{
|
||||||
uint16_t vm_id, i;
|
uint16_t vm_id, i;
|
||||||
uint64_t hv_start_pa = hva2hpa((void *)(get_hv_image_base()));
|
uint64_t hv_start_pa = hva2hpa((void *)(get_hv_image_base()));
|
||||||
uint64_t hv_end_pa = hv_start_pa + get_hv_ram_size();
|
uint64_t hv_end_pa = hv_start_pa + get_hv_ram_size();
|
||||||
uint32_t entries_count = get_e820_entries_count();
|
uint32_t entries_count = get_e820_entries_count();
|
||||||
struct acrn_vm_config *sos_vm_config = get_vm_config(vm->vm_id);
|
struct acrn_vm_config *service_vm_config = get_vm_config(vm->vm_id);
|
||||||
|
|
||||||
(void)memcpy_s((void *)sos_vm_e820, entries_count * sizeof(struct e820_entry),
|
(void)memcpy_s((void *)service_vm_e820, entries_count * sizeof(struct e820_entry),
|
||||||
(const void *)get_e820_entry(), entries_count * sizeof(struct e820_entry));
|
(const void *)get_e820_entry(), entries_count * sizeof(struct e820_entry));
|
||||||
|
|
||||||
vm->e820_entry_num = entries_count;
|
vm->e820_entry_num = entries_count;
|
||||||
vm->e820_entries = sos_vm_e820;
|
vm->e820_entries = service_vm_e820;
|
||||||
/* filter out hv memory from e820 table */
|
/* filter out hv memory from e820 table */
|
||||||
filter_mem_from_sos_e820(vm, hv_start_pa, hv_end_pa);
|
filter_mem_from_service_vm_e820(vm, hv_start_pa, hv_end_pa);
|
||||||
|
|
||||||
/* filter out prelaunched vm memory from e820 table */
|
/* filter out prelaunched vm memory from e820 table */
|
||||||
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
|
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
|
||||||
struct acrn_vm_config *vm_config = get_vm_config(vm_id);
|
struct acrn_vm_config *vm_config = get_vm_config(vm_id);
|
||||||
|
|
||||||
if (vm_config->load_order == PRE_LAUNCHED_VM) {
|
if (vm_config->load_order == PRE_LAUNCHED_VM) {
|
||||||
filter_mem_from_sos_e820(vm, vm_config->memory.start_hpa,
|
filter_mem_from_service_vm_e820(vm, vm_config->memory.start_hpa,
|
||||||
vm_config->memory.start_hpa + vm_config->memory.size);
|
vm_config->memory.start_hpa + vm_config->memory.size);
|
||||||
|
|
||||||
/* if HPA2 is available, filter it out as well*/
|
/* if HPA2 is available, filter it out as well*/
|
||||||
if (vm_config->memory.size_hpa2 != 0UL) {
|
if (vm_config->memory.size_hpa2 != 0UL) {
|
||||||
filter_mem_from_sos_e820(vm, vm_config->memory.start_hpa2,
|
filter_mem_from_service_vm_e820(vm, vm_config->memory.start_hpa2,
|
||||||
vm_config->memory.start_hpa2 + vm_config->memory.size_hpa2);
|
vm_config->memory.start_hpa2 + vm_config->memory.size_hpa2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0U; i < vm->e820_entry_num; i++) {
|
for (i = 0U; i < vm->e820_entry_num; i++) {
|
||||||
struct e820_entry *entry = &sos_vm_e820[i];
|
struct e820_entry *entry = &service_vm_e820[i];
|
||||||
if ((entry->type == E820_TYPE_RAM)) {
|
if ((entry->type == E820_TYPE_RAM)) {
|
||||||
sos_vm_config->memory.size += entry->length;
|
service_vm_config->memory.size += entry->length;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sort_vm_e820(vm);
|
sort_vm_e820(vm);
|
||||||
|
@ -38,7 +38,7 @@ static uint64_t sept_page_bitmap[TOTAL_SEPT_4K_PAGES_NUM / 64U];
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief Reserve space for SEPT 4K pages from platform E820 table
|
* @brief Reserve space for SEPT 4K pages from platform E820 table
|
||||||
* At moment, we only support nested VMX for SOS VM.
|
* At moment, we only support nested VMX for Service VM.
|
||||||
*/
|
*/
|
||||||
void reserve_buffer_for_sept_pages(void)
|
void reserve_buffer_for_sept_pages(void)
|
||||||
{
|
{
|
||||||
|
@ -1889,7 +1889,7 @@ static int32_t inject_msi_for_non_lapic_pt(struct acrn_vm *vm, uint64_t addr, ui
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*@pre Pointer vm shall point to SOS_VM
|
*@pre Pointer vm shall point to Service VM
|
||||||
*/
|
*/
|
||||||
static void inject_msi_for_lapic_pt(struct acrn_vm *vm, uint64_t addr, uint64_t data)
|
static void inject_msi_for_lapic_pt(struct acrn_vm *vm, uint64_t addr, uint64_t data)
|
||||||
{
|
{
|
||||||
|
@ -46,15 +46,15 @@
|
|||||||
/* Local variables */
|
/* Local variables */
|
||||||
|
|
||||||
/* pre-assumption: TRUSTY_RAM_SIZE is 2M aligned */
|
/* pre-assumption: TRUSTY_RAM_SIZE is 2M aligned */
|
||||||
static struct page post_uos_sworld_memory[MAX_POST_VM_NUM][TRUSTY_RAM_SIZE >> PAGE_SHIFT] __aligned(MEM_2M);
|
static struct page post_user_vm_sworld_memory[MAX_POST_VM_NUM][TRUSTY_RAM_SIZE >> PAGE_SHIFT] __aligned(MEM_2M);
|
||||||
|
|
||||||
static struct acrn_vm vm_array[CONFIG_MAX_VM_NUM] __aligned(PAGE_SIZE);
|
static struct acrn_vm vm_array[CONFIG_MAX_VM_NUM] __aligned(PAGE_SIZE);
|
||||||
|
|
||||||
static struct acrn_vm *sos_vm_ptr = NULL;
|
static struct acrn_vm *service_vm_ptr = NULL;
|
||||||
|
|
||||||
void *get_sworld_memory_base(void)
|
void *get_sworld_memory_base(void)
|
||||||
{
|
{
|
||||||
return post_uos_sworld_memory;
|
return post_user_vm_sworld_memory;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint16_t get_vmid_by_uuid(const uint8_t *uuid)
|
uint16_t get_vmid_by_uuid(const uint8_t *uuid)
|
||||||
@ -229,12 +229,12 @@ struct acrn_vm *get_vm_from_vmid(uint16_t vm_id)
|
|||||||
return &vm_array[vm_id];
|
return &vm_array[vm_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* return a pointer to the virtual machine structure of SOS VM */
|
/* return a pointer to the virtual machine structure of Service VM */
|
||||||
struct acrn_vm *get_sos_vm(void)
|
struct acrn_vm *get_service_vm(void)
|
||||||
{
|
{
|
||||||
ASSERT(sos_vm_ptr != NULL, "sos_vm_ptr is NULL");
|
ASSERT(service_vm_ptr != NULL, "service_vm_ptr is NULL");
|
||||||
|
|
||||||
return sos_vm_ptr;
|
return service_vm_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -319,14 +319,14 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void deny_pci_bar_access(struct acrn_vm *sos, const struct pci_pdev *pdev)
|
static void deny_pci_bar_access(struct acrn_vm *service_vm, const struct pci_pdev *pdev)
|
||||||
{
|
{
|
||||||
uint32_t idx, mask;
|
uint32_t idx, mask;
|
||||||
struct pci_vbar vbar = {};
|
struct pci_vbar vbar = {};
|
||||||
uint64_t base = 0UL, size = 0UL;
|
uint64_t base = 0UL, size = 0UL;
|
||||||
uint64_t *pml4_page;
|
uint64_t *pml4_page;
|
||||||
|
|
||||||
pml4_page = (uint64_t *)sos->arch_vm.nworld_eptp;
|
pml4_page = (uint64_t *)service_vm->arch_vm.nworld_eptp;
|
||||||
|
|
||||||
for ( idx= 0; idx < pdev->nr_bars; idx++) {
|
for ( idx= 0; idx < pdev->nr_bars; idx++) {
|
||||||
vbar.bar_type.bits = pdev->bars[idx].phy_bar;
|
vbar.bar_type.bits = pdev->bars[idx].phy_bar;
|
||||||
@ -347,31 +347,31 @@ static void deny_pci_bar_access(struct acrn_vm *sos, const struct pci_pdev *pdev
|
|||||||
if ((base != 0UL)) {
|
if ((base != 0UL)) {
|
||||||
if (is_pci_io_bar(&vbar)) {
|
if (is_pci_io_bar(&vbar)) {
|
||||||
base &= 0xffffU;
|
base &= 0xffffU;
|
||||||
deny_guest_pio_access(sos, base, size);
|
deny_guest_pio_access(service_vm, base, size);
|
||||||
} else {
|
} else {
|
||||||
/*for passthru device MMIO BAR base must be 4K aligned. This is the requirement of passthru devices.*/
|
/*for passthru device MMIO BAR base must be 4K aligned. This is the requirement of passthru devices.*/
|
||||||
ASSERT((base & PAGE_MASK) != 0U, "%02x:%02x.%d bar[%d] 0x%lx, is not 4K aligned!",
|
ASSERT((base & PAGE_MASK) != 0U, "%02x:%02x.%d bar[%d] 0x%lx, is not 4K aligned!",
|
||||||
pdev->bdf.bits.b, pdev->bdf.bits.d, pdev->bdf.bits.f, idx, base);
|
pdev->bdf.bits.b, pdev->bdf.bits.d, pdev->bdf.bits.f, idx, base);
|
||||||
size = round_page_up(size);
|
size = round_page_up(size);
|
||||||
ept_del_mr(sos, pml4_page, base, size);
|
ept_del_mr(service_vm, pml4_page, base, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void deny_pdevs(struct acrn_vm *sos, struct acrn_vm_pci_dev_config *pci_devs, uint16_t pci_dev_num)
|
static void deny_pdevs(struct acrn_vm *service_vm, struct acrn_vm_pci_dev_config *pci_devs, uint16_t pci_dev_num)
|
||||||
{
|
{
|
||||||
uint16_t i;
|
uint16_t i;
|
||||||
|
|
||||||
for (i = 0; i < pci_dev_num; i++) {
|
for (i = 0; i < pci_dev_num; i++) {
|
||||||
if ( pci_devs[i].pdev != NULL) {
|
if ( pci_devs[i].pdev != NULL) {
|
||||||
deny_pci_bar_access(sos, pci_devs[i].pdev);
|
deny_pci_bar_access(service_vm, pci_devs[i].pdev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void deny_hv_owned_devices(struct acrn_vm *sos)
|
static void deny_hv_owned_devices(struct acrn_vm *service_vm)
|
||||||
{
|
{
|
||||||
uint16_t pio_address;
|
uint16_t pio_address;
|
||||||
uint32_t nbytes, i;
|
uint32_t nbytes, i;
|
||||||
@ -379,11 +379,11 @@ static void deny_hv_owned_devices(struct acrn_vm *sos)
|
|||||||
const struct pci_pdev **hv_owned = get_hv_owned_pdevs();
|
const struct pci_pdev **hv_owned = get_hv_owned_pdevs();
|
||||||
|
|
||||||
for (i = 0U; i < get_hv_owned_pdev_num(); i++) {
|
for (i = 0U; i < get_hv_owned_pdev_num(); i++) {
|
||||||
deny_pci_bar_access(sos, hv_owned[i]);
|
deny_pci_bar_access(service_vm, hv_owned[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_pio_dbg_uart_cfg(&pio_address, &nbytes)) {
|
if (get_pio_dbg_uart_cfg(&pio_address, &nbytes)) {
|
||||||
deny_guest_pio_access(sos, pio_address, nbytes);
|
deny_guest_pio_access(service_vm, pio_address, nbytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -395,12 +395,12 @@ static void deny_hv_owned_devices(struct acrn_vm *sos)
|
|||||||
* @pre vm != NULL
|
* @pre vm != NULL
|
||||||
* @pre is_service_vm(vm) == true
|
* @pre is_service_vm(vm) == true
|
||||||
*/
|
*/
|
||||||
static void prepare_sos_vm_memmap(struct acrn_vm *vm)
|
static void prepare_service_vm_memmap(struct acrn_vm *vm)
|
||||||
{
|
{
|
||||||
uint16_t vm_id;
|
uint16_t vm_id;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint64_t hv_hpa;
|
uint64_t hv_hpa;
|
||||||
uint64_t sos_high64_max_ram = MEM_4G;
|
uint64_t service_vm_high64_max_ram = MEM_4G;
|
||||||
struct acrn_vm_config *vm_config;
|
struct acrn_vm_config *vm_config;
|
||||||
uint64_t *pml4_page = (uint64_t *)vm->arch_vm.nworld_eptp;
|
uint64_t *pml4_page = (uint64_t *)vm->arch_vm.nworld_eptp;
|
||||||
struct epc_section* epc_secs;
|
struct epc_section* epc_secs;
|
||||||
@ -410,18 +410,18 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
|
|||||||
const struct e820_entry *p_e820 = vm->e820_entries;
|
const struct e820_entry *p_e820 = vm->e820_entries;
|
||||||
struct pci_mmcfg_region *pci_mmcfg;
|
struct pci_mmcfg_region *pci_mmcfg;
|
||||||
|
|
||||||
pr_dbg("SOS_VM e820 layout:\n");
|
pr_dbg("Service VM e820 layout:\n");
|
||||||
for (i = 0U; i < entries_count; i++) {
|
for (i = 0U; i < entries_count; i++) {
|
||||||
entry = p_e820 + i;
|
entry = p_e820 + i;
|
||||||
pr_dbg("e820 table: %d type: 0x%x", i, entry->type);
|
pr_dbg("e820 table: %d type: 0x%x", i, entry->type);
|
||||||
pr_dbg("BaseAddress: 0x%016lx length: 0x%016lx\n", entry->baseaddr, entry->length);
|
pr_dbg("BaseAddress: 0x%016lx length: 0x%016lx\n", entry->baseaddr, entry->length);
|
||||||
if (entry->type == E820_TYPE_RAM) {
|
if (entry->type == E820_TYPE_RAM) {
|
||||||
sos_high64_max_ram = max((entry->baseaddr + entry->length), sos_high64_max_ram);
|
service_vm_high64_max_ram = max((entry->baseaddr + entry->length), service_vm_high64_max_ram);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* create real ept map for [0, sos_high64_max_ram) with UC */
|
/* create real ept map for [0, service_vm_high64_max_ram) with UC */
|
||||||
ept_add_mr(vm, pml4_page, 0UL, 0UL, sos_high64_max_ram, EPT_RWX | EPT_UNCACHED);
|
ept_add_mr(vm, pml4_page, 0UL, 0UL, service_vm_high64_max_ram, EPT_RWX | EPT_UNCACHED);
|
||||||
|
|
||||||
/* update ram entries to WB attr */
|
/* update ram entries to WB attr */
|
||||||
for (i = 0U; i < entries_count; i++) {
|
for (i = 0U; i < entries_count; i++) {
|
||||||
@ -431,9 +431,9 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmap all platform EPC resource from SOS.
|
/* Unmap all platform EPC resource from Service VM.
|
||||||
* This part has already been marked as reserved by BIOS in E820
|
* This part has already been marked as reserved by BIOS in E820
|
||||||
* will cause EPT violation if sos accesses EPC resource.
|
* will cause EPT violation if Service VM accesses EPC resource.
|
||||||
*/
|
*/
|
||||||
epc_secs = get_phys_epc();
|
epc_secs = get_phys_epc();
|
||||||
for (i = 0U; (i < MAX_EPC_SECTIONS) && (epc_secs[i].size != 0UL); i++) {
|
for (i = 0U; (i < MAX_EPC_SECTIONS) && (epc_secs[i].size != 0UL); i++) {
|
||||||
@ -441,7 +441,7 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* unmap hypervisor itself for safety
|
/* unmap hypervisor itself for safety
|
||||||
* will cause EPT violation if sos accesses hv memory
|
* will cause EPT violation if Service VM accesses hv memory
|
||||||
*/
|
*/
|
||||||
hv_hpa = hva2hpa((void *)(get_hv_image_base()));
|
hv_hpa = hva2hpa((void *)(get_hv_image_base()));
|
||||||
ept_del_mr(vm, pml4_page, hv_hpa, get_hv_ram_size());
|
ept_del_mr(vm, pml4_page, hv_hpa, get_hv_ram_size());
|
||||||
@ -549,8 +549,8 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
|
|||||||
|
|
||||||
if (is_service_vm(vm)) {
|
if (is_service_vm(vm)) {
|
||||||
/* Only for Service VM */
|
/* Only for Service VM */
|
||||||
create_sos_vm_e820(vm);
|
create_service_vm_e820(vm);
|
||||||
prepare_sos_vm_memmap(vm);
|
prepare_service_vm_memmap(vm);
|
||||||
|
|
||||||
status = init_vm_boot_info(vm);
|
status = init_vm_boot_info(vm);
|
||||||
} else {
|
} else {
|
||||||
@ -559,11 +559,11 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
|
|||||||
vm->sworld_control.flag.supported = 1U;
|
vm->sworld_control.flag.supported = 1U;
|
||||||
}
|
}
|
||||||
if (vm->sworld_control.flag.supported != 0UL) {
|
if (vm->sworld_control.flag.supported != 0UL) {
|
||||||
uint16_t sos_vm_id = (get_sos_vm())->vm_id;
|
uint16_t service_vm_id = (get_service_vm())->vm_id;
|
||||||
uint16_t page_idx = vmid_2_rel_vmid(sos_vm_id, vm_id) - 1U;
|
uint16_t page_idx = vmid_2_rel_vmid(service_vm_id, vm_id) - 1U;
|
||||||
|
|
||||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||||
hva2hpa(post_uos_sworld_memory[page_idx]),
|
hva2hpa(post_user_vm_sworld_memory[page_idx]),
|
||||||
TRUSTY_EPT_REBASE_GPA, TRUSTY_RAM_SIZE, EPT_WB | EPT_RWX);
|
TRUSTY_EPT_REBASE_GPA, TRUSTY_RAM_SIZE, EPT_WB | EPT_RWX);
|
||||||
}
|
}
|
||||||
if (vm_config->name[0] == '\0') {
|
if (vm_config->name[0] == '\0') {
|
||||||
@ -654,7 +654,7 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
|
|||||||
|
|
||||||
if (status == 0) {
|
if (status == 0) {
|
||||||
/* We have assumptions:
|
/* We have assumptions:
|
||||||
* 1) vcpus used by SOS has been offlined by DM before UOS re-use it.
|
* 1) vcpus used by Service VM has been offlined by DM before User VM re-use it.
|
||||||
* 2) pcpu_bitmap passed sanitization is OK for vcpu creating.
|
* 2) pcpu_bitmap passed sanitization is OK for vcpu creating.
|
||||||
*/
|
*/
|
||||||
vm->hw.cpu_affinity = pcpu_bitmap;
|
vm->hw.cpu_affinity = pcpu_bitmap;
|
||||||
@ -907,7 +907,7 @@ void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec)
|
|||||||
|
|
||||||
reset_vcpu(bsp, POWER_ON_RESET);
|
reset_vcpu(bsp, POWER_ON_RESET);
|
||||||
|
|
||||||
/* When SOS resume from S3, it will return to real mode
|
/* When Service VM resume from S3, it will return to real mode
|
||||||
* with entry set to wakeup_vec.
|
* with entry set to wakeup_vec.
|
||||||
*/
|
*/
|
||||||
set_vcpu_startup_entry(bsp, wakeup_vec);
|
set_vcpu_startup_entry(bsp, wakeup_vec);
|
||||||
@ -930,7 +930,7 @@ void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config)
|
|||||||
#ifdef CONFIG_SECURITY_VM_FIXUP
|
#ifdef CONFIG_SECURITY_VM_FIXUP
|
||||||
security_vm_fixup(vm_id);
|
security_vm_fixup(vm_id);
|
||||||
#endif
|
#endif
|
||||||
/* SOS and pre-launched VMs launch on all pCPUs defined in vm_config->cpu_affinity */
|
/* Service VM and pre-launched VMs launch on all pCPUs defined in vm_config->cpu_affinity */
|
||||||
err = create_vm(vm_id, vm_config->cpu_affinity, vm_config, &vm);
|
err = create_vm(vm_id, vm_config->cpu_affinity, vm_config, &vm);
|
||||||
|
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
@ -940,8 +940,8 @@ void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config)
|
|||||||
|
|
||||||
if (is_service_vm(vm)) {
|
if (is_service_vm(vm)) {
|
||||||
/* We need to ensure all modules of pre-launched VMs have been loaded already
|
/* We need to ensure all modules of pre-launched VMs have been loaded already
|
||||||
* before loading SOS VM modules, otherwise the module of pre-launched VMs could
|
* before loading Service VM modules, otherwise the module of pre-launched VMs could
|
||||||
* be corrupted because SOS VM kernel might pick any usable RAM to extract kernel
|
* be corrupted because Service VM kernel might pick any usable RAM to extract kernel
|
||||||
* when KASLR enabled.
|
* when KASLR enabled.
|
||||||
* In case the pre-launched VMs aren't loaded successfuly that cause deadlock here,
|
* In case the pre-launched VMs aren't loaded successfuly that cause deadlock here,
|
||||||
* use a 10000ms timer to break the waiting loop.
|
* use a 10000ms timer to break the waiting loop.
|
||||||
@ -990,7 +990,7 @@ void launch_vms(uint16_t pcpu_id)
|
|||||||
if ((vm_config->load_order == SOS_VM) || (vm_config->load_order == PRE_LAUNCHED_VM)) {
|
if ((vm_config->load_order == SOS_VM) || (vm_config->load_order == PRE_LAUNCHED_VM)) {
|
||||||
if (pcpu_id == get_configured_bsp_pcpu_id(vm_config)) {
|
if (pcpu_id == get_configured_bsp_pcpu_id(vm_config)) {
|
||||||
if (vm_config->load_order == SOS_VM) {
|
if (vm_config->load_order == SOS_VM) {
|
||||||
sos_vm_ptr = &vm_array[vm_id];
|
service_vm_ptr = &vm_array[vm_id];
|
||||||
}
|
}
|
||||||
prepare_vm(vm_id, vm_config);
|
prepare_vm(vm_id, vm_config);
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
|
|||||||
io_req->reqs.pio_request.size = 2UL;
|
io_req->reqs.pio_request.size = 2UL;
|
||||||
io_req->reqs.pio_request.value = (VIRTUAL_PM1A_SLP_EN | (5U << 10U));
|
io_req->reqs.pio_request.value = (VIRTUAL_PM1A_SLP_EN | (5U << 10U));
|
||||||
|
|
||||||
/* Inject pm1a S5 request to SOS to shut down the guest */
|
/* Inject pm1a S5 request to Service VM to shut down the guest */
|
||||||
(void)emulate_io(vcpu, io_req);
|
(void)emulate_io(vcpu, io_req);
|
||||||
} else {
|
} else {
|
||||||
if (is_service_vm(vm)) {
|
if (is_service_vm(vm)) {
|
||||||
@ -47,7 +47,7 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Either SOS or pre-launched VMs */
|
/* Either Service VM or pre-launched VMs */
|
||||||
get_vm_lock(vm);
|
get_vm_lock(vm);
|
||||||
poweroff_if_rt_vm(vm);
|
poweroff_if_rt_vm(vm);
|
||||||
pause_vm(vm);
|
pause_vm(vm);
|
||||||
@ -101,7 +101,7 @@ static bool handle_common_reset_reg_write(struct acrn_vcpu *vcpu, bool reset)
|
|||||||
ret = false;
|
ret = false;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* If it's SOS reset while RTVM is still alive
|
* If it's Service VM reset while RTVM is still alive
|
||||||
* or pre-launched VM reset,
|
* or pre-launched VM reset,
|
||||||
* ACRN doesn't support re-launch, just shutdown the guest.
|
* ACRN doesn't support re-launch, just shutdown the guest.
|
||||||
*/
|
*/
|
||||||
@ -116,7 +116,7 @@ static bool handle_common_reset_reg_write(struct acrn_vcpu *vcpu, bool reset)
|
|||||||
ret = false;
|
ret = false;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Ignore writes from SOS and pre-launched VM.
|
* Ignore writes from Service VM and pre-launched VM.
|
||||||
* Equivalent to hiding this port from the guest.
|
* Equivalent to hiding this port from the guest.
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
@ -198,7 +198,7 @@ static bool handle_reset_reg_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t
|
|||||||
*/
|
*/
|
||||||
void register_reset_port_handler(struct acrn_vm *vm)
|
void register_reset_port_handler(struct acrn_vm *vm)
|
||||||
{
|
{
|
||||||
/* Don't support SOS and pre-launched VM re-launch for now. */
|
/* Don't support Service VM and pre-launched VM re-launch for now. */
|
||||||
if (!is_postlaunched_vm(vm) || is_rt_vm(vm)) {
|
if (!is_postlaunched_vm(vm) || is_rt_vm(vm)) {
|
||||||
struct acpi_reset_reg *reset_reg = get_host_reset_reg_data();
|
struct acpi_reset_reg *reset_reg = get_host_reset_reg_data();
|
||||||
struct acrn_acpi_generic_address *gas = &(reset_reg->reg);
|
struct acrn_acpi_generic_address *gas = &(reset_reg->reg);
|
||||||
@ -215,7 +215,7 @@ void register_reset_port_handler(struct acrn_vm *vm)
|
|||||||
register_pio_emulation_handler(vm, CF9_PIO_IDX, &io_range, handle_reset_reg_read, handle_cf9_write);
|
register_pio_emulation_handler(vm, CF9_PIO_IDX, &io_range, handle_reset_reg_read, handle_cf9_write);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* - here is taking care of SOS only:
|
* - here is taking care of Service VM only:
|
||||||
* Don't support MMIO or PCI based reset register for now.
|
* Don't support MMIO or PCI based reset register for now.
|
||||||
* ACPI Spec: Register_Bit_Width must be 8 and Register_Bit_Offset must be 0.
|
* ACPI Spec: Register_Bit_Width must be 8 and Register_Bit_Offset must be 0.
|
||||||
*/
|
*/
|
||||||
|
@ -20,7 +20,8 @@ struct hc_dispatch {
|
|||||||
|
|
||||||
/* The permission_flags is a bitmap of guest flags indicating whether a VM can invoke this hypercall:
|
/* The permission_flags is a bitmap of guest flags indicating whether a VM can invoke this hypercall:
|
||||||
*
|
*
|
||||||
* - If permission_flags == 0UL (which is the default value), this hypercall can only be invoked by the SOS.
|
* - If permission_flags == 0UL (which is the default value), this hypercall can only be invoked by the
|
||||||
|
* Service VM.
|
||||||
* - Otherwise, this hypercall can only be invoked by a VM whose guest flags have ALL set bits in
|
* - Otherwise, this hypercall can only be invoked by a VM whose guest flags have ALL set bits in
|
||||||
* permission_flags.
|
* permission_flags.
|
||||||
*/
|
*/
|
||||||
@ -31,8 +32,8 @@ struct hc_dispatch {
|
|||||||
static const struct hc_dispatch hc_dispatch_table[] = {
|
static const struct hc_dispatch hc_dispatch_table[] = {
|
||||||
[HC_IDX(HC_GET_API_VERSION)] = {
|
[HC_IDX(HC_GET_API_VERSION)] = {
|
||||||
.handler = hcall_get_api_version},
|
.handler = hcall_get_api_version},
|
||||||
[HC_IDX(HC_SOS_OFFLINE_CPU)] = {
|
[HC_IDX(HC_SERVICE_VM_OFFLINE_CPU)] = {
|
||||||
.handler = hcall_sos_offline_cpu},
|
.handler = hcall_service_vm_offline_cpu},
|
||||||
[HC_IDX(HC_SET_CALLBACK_VECTOR)] = {
|
[HC_IDX(HC_SET_CALLBACK_VECTOR)] = {
|
||||||
.handler = hcall_set_callback_vector},
|
.handler = hcall_set_callback_vector},
|
||||||
[HC_IDX(HC_GET_PLATFORM_INFO)] = {
|
[HC_IDX(HC_GET_PLATFORM_INFO)] = {
|
||||||
@ -106,7 +107,7 @@ static const struct hc_dispatch hc_dispatch_table[] = {
|
|||||||
|
|
||||||
#define GUEST_FLAGS_ALLOWING_HYPERCALLS GUEST_FLAG_SECURE_WORLD_ENABLED
|
#define GUEST_FLAGS_ALLOWING_HYPERCALLS GUEST_FLAG_SECURE_WORLD_ENABLED
|
||||||
|
|
||||||
struct acrn_vm *parse_target_vm(struct acrn_vm *sos_vm, uint64_t hcall_id, uint64_t param1, __unused uint64_t param2)
|
struct acrn_vm *parse_target_vm(struct acrn_vm *service_vm, uint64_t hcall_id, uint64_t param1, __unused uint64_t param2)
|
||||||
{
|
{
|
||||||
struct acrn_vm *target_vm = NULL;
|
struct acrn_vm *target_vm = NULL;
|
||||||
uint16_t vm_id = ACRN_INVALID_VMID;
|
uint16_t vm_id = ACRN_INVALID_VMID;
|
||||||
@ -116,34 +117,34 @@ struct acrn_vm *parse_target_vm(struct acrn_vm *sos_vm, uint64_t hcall_id, uint6
|
|||||||
|
|
||||||
switch (hcall_id) {
|
switch (hcall_id) {
|
||||||
case HC_CREATE_VM:
|
case HC_CREATE_VM:
|
||||||
if (copy_from_gpa(sos_vm, &cv, param1, sizeof(cv)) == 0) {
|
if (copy_from_gpa(service_vm, &cv, param1, sizeof(cv)) == 0) {
|
||||||
vm_id = get_vmid_by_uuid(&cv.uuid[0]);
|
vm_id = get_vmid_by_uuid(&cv.uuid[0]);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case HC_PM_GET_CPU_STATE:
|
case HC_PM_GET_CPU_STATE:
|
||||||
vm_id = rel_vmid_2_vmid(sos_vm->vm_id, (uint16_t)((param1 & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT));
|
vm_id = rel_vmid_2_vmid(service_vm->vm_id, (uint16_t)((param1 & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case HC_VM_SET_MEMORY_REGIONS:
|
case HC_VM_SET_MEMORY_REGIONS:
|
||||||
if (copy_from_gpa(sos_vm, ®ions, param1, sizeof(regions)) == 0) {
|
if (copy_from_gpa(service_vm, ®ions, param1, sizeof(regions)) == 0) {
|
||||||
/* the vmid in regions is a relative vm id, need to convert to absolute vm id */
|
/* the vmid in regions is a relative vm id, need to convert to absolute vm id */
|
||||||
vm_id = rel_vmid_2_vmid(sos_vm->vm_id, regions.vmid);
|
vm_id = rel_vmid_2_vmid(service_vm->vm_id, regions.vmid);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case HC_GET_API_VERSION:
|
case HC_GET_API_VERSION:
|
||||||
case HC_SOS_OFFLINE_CPU:
|
case HC_SERVICE_VM_OFFLINE_CPU:
|
||||||
case HC_SET_CALLBACK_VECTOR:
|
case HC_SET_CALLBACK_VECTOR:
|
||||||
case HC_GET_PLATFORM_INFO:
|
case HC_GET_PLATFORM_INFO:
|
||||||
case HC_SETUP_SBUF:
|
case HC_SETUP_SBUF:
|
||||||
case HC_SETUP_HV_NPK_LOG:
|
case HC_SETUP_HV_NPK_LOG:
|
||||||
case HC_PROFILING_OPS:
|
case HC_PROFILING_OPS:
|
||||||
case HC_GET_HW_INFO:
|
case HC_GET_HW_INFO:
|
||||||
target_vm = sos_vm;
|
target_vm = service_vm;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
relative_vm_id = (uint16_t)param1;
|
relative_vm_id = (uint16_t)param1;
|
||||||
vm_id = rel_vmid_2_vmid(sos_vm->vm_id, relative_vm_id);
|
vm_id = rel_vmid_2_vmid(service_vm->vm_id, relative_vm_id);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,7 +174,7 @@ static int32_t dispatch_hypercall(struct acrn_vcpu *vcpu)
|
|||||||
uint64_t param2 = vcpu_get_gpreg(vcpu, CPU_REG_RSI); /* hypercall param2 from guest */
|
uint64_t param2 = vcpu_get_gpreg(vcpu, CPU_REG_RSI); /* hypercall param2 from guest */
|
||||||
|
|
||||||
if ((permission_flags == 0UL) && is_service_vm(vm)) {
|
if ((permission_flags == 0UL) && is_service_vm(vm)) {
|
||||||
/* A permission_flags of 0 indicates that this hypercall is for SOS to manage
|
/* A permission_flags of 0 indicates that this hypercall is for Service VM to manage
|
||||||
* post-launched VMs.
|
* post-launched VMs.
|
||||||
*/
|
*/
|
||||||
struct acrn_vm *target_vm = parse_target_vm(vm, hcall_id, param1, param2);
|
struct acrn_vm *target_vm = parse_target_vm(vm, hcall_id, param1, param2);
|
||||||
@ -198,7 +199,7 @@ static int32_t dispatch_hypercall(struct acrn_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pass return value to SOS by register rax.
|
* Pass return value to Service VM by register rax.
|
||||||
* This function should always return 0 since we shouldn't
|
* This function should always return 0 since we shouldn't
|
||||||
* deal with hypercall error in hypervisor.
|
* deal with hypercall error in hypervisor.
|
||||||
*/
|
*/
|
||||||
@ -213,7 +214,7 @@ int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
/*
|
/*
|
||||||
* The following permission checks are applied to hypercalls.
|
* The following permission checks are applied to hypercalls.
|
||||||
*
|
*
|
||||||
* 1. Only SOS and VMs with specific guest flags (referred to as 'allowed VMs' hereinafter) can invoke
|
* 1. Only Service VM and VMs with specific guest flags (referred to as 'allowed VMs' hereinafter) can invoke
|
||||||
* hypercalls by executing the `vmcall` instruction. Attempts to execute the `vmcall` instruction in the
|
* hypercalls by executing the `vmcall` instruction. Attempts to execute the `vmcall` instruction in the
|
||||||
* other VMs will trigger #UD.
|
* other VMs will trigger #UD.
|
||||||
* 2. Attempts to execute the `vmcall` instruction from ring 1, 2 or 3 in an allowed VM will trigger #GP(0).
|
* 2. Attempts to execute the `vmcall` instruction from ring 1, 2 or 3 in an allowed VM will trigger #GP(0).
|
||||||
|
@ -976,7 +976,7 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
case MSR_IA32_BIOS_UPDT_TRIG:
|
case MSR_IA32_BIOS_UPDT_TRIG:
|
||||||
{
|
{
|
||||||
/* We only allow SOS to do uCode update */
|
/* We only allow Service VM to do uCode update */
|
||||||
if (is_service_vm(vcpu->vm)) {
|
if (is_service_vm(vcpu->vm)) {
|
||||||
acrn_update_ucode(vcpu, v);
|
acrn_update_ucode(vcpu, v);
|
||||||
}
|
}
|
||||||
|
@ -115,7 +115,7 @@ void init_vmtrr(struct acrn_vcpu *vcpu)
|
|||||||
for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
|
for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
|
||||||
if (cap.bits.fix != 0U) {
|
if (cap.bits.fix != 0U) {
|
||||||
/*
|
/*
|
||||||
* The system firmware runs in VMX non-root mode on SOS_VM.
|
* The system firmware runs in VMX non-root mode on Service VM.
|
||||||
* In some cases, the firmware needs particular mem type
|
* In some cases, the firmware needs particular mem type
|
||||||
* at certain mmeory locations (e.g. UC for some
|
* at certain mmeory locations (e.g. UC for some
|
||||||
* hardware registers), so we need to configure EPT
|
* hardware registers), so we need to configure EPT
|
||||||
@ -124,7 +124,7 @@ void init_vmtrr(struct acrn_vcpu *vcpu)
|
|||||||
vmtrr->fixed_range[i].value = msr_read(fixed_mtrr_map[i].msr);
|
vmtrr->fixed_range[i].value = msr_read(fixed_mtrr_map[i].msr);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* For non-sos_vm EPT, all memory is setup with WB type in
|
* For non-Service VM EPT, all memory is setup with WB type in
|
||||||
* EPT, so we setup fixed range MTRRs accordingly.
|
* EPT, so we setup fixed range MTRRs accordingly.
|
||||||
*/
|
*/
|
||||||
vmtrr->fixed_range[i].value = MTRR_FIXED_RANGE_ALL_WB;
|
vmtrr->fixed_range[i].value = MTRR_FIXED_RANGE_ALL_WB;
|
||||||
|
@ -21,14 +21,14 @@
|
|||||||
void arch_fire_hsm_interrupt(void)
|
void arch_fire_hsm_interrupt(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* use vLAPIC to inject vector to SOS vcpu 0 if vlapic is enabled
|
* use vLAPIC to inject vector to Service VM vcpu 0 if vlapic is enabled
|
||||||
* otherwise, send IPI hardcoded to BSP_CPU_ID
|
* otherwise, send IPI hardcoded to BSP_CPU_ID
|
||||||
*/
|
*/
|
||||||
struct acrn_vm *sos_vm;
|
struct acrn_vm *service_vm;
|
||||||
struct acrn_vcpu *vcpu;
|
struct acrn_vcpu *vcpu;
|
||||||
|
|
||||||
sos_vm = get_sos_vm();
|
service_vm = get_service_vm();
|
||||||
vcpu = vcpu_from_vid(sos_vm, BSP_CPU_ID);
|
vcpu = vcpu_from_vid(service_vm, BSP_CPU_ID);
|
||||||
|
|
||||||
vlapic_set_intr(vcpu, get_hsm_notification_vector(), LAPIC_TRIG_EDGE);
|
vlapic_set_intr(vcpu, get_hsm_notification_vector(), LAPIC_TRIG_EDGE);
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ static int32_t request_notification_irq(irq_action_t func, void *data)
|
|||||||
*/
|
*/
|
||||||
void setup_notification(void)
|
void setup_notification(void)
|
||||||
{
|
{
|
||||||
/* support IPI notification, SOS_VM will register all CPU */
|
/* support IPI notification, Service VM will register all CPU */
|
||||||
if (request_notification_irq(kick_notification, NULL) < 0) {
|
if (request_notification_irq(kick_notification, NULL) < 0) {
|
||||||
pr_err("Failed to setup notification");
|
pr_err("Failed to setup notification");
|
||||||
}
|
}
|
||||||
|
@ -98,15 +98,15 @@ void fill_seed_arg(char *cmd_dst, size_t cmd_sz)
|
|||||||
for (i = 0U; seed_arg[i].str != NULL; i++) {
|
for (i = 0U; seed_arg[i].str != NULL; i++) {
|
||||||
if (seed_arg[i].addr != 0UL) {
|
if (seed_arg[i].addr != 0UL) {
|
||||||
|
|
||||||
snprintf(cmd_dst, cmd_sz, "%s0x%X ", seed_arg[i].str, sos_vm_hpa2gpa(seed_arg[i].addr));
|
snprintf(cmd_dst, cmd_sz, "%s0x%X ", seed_arg[i].str, service_vm_hpa2gpa(seed_arg[i].addr));
|
||||||
|
|
||||||
if (seed_arg[i].bootloader_id == BOOTLOADER_SBL) {
|
if (seed_arg[i].bootloader_id == BOOTLOADER_SBL) {
|
||||||
struct image_boot_params *boot_params =
|
struct image_boot_params *boot_params =
|
||||||
(struct image_boot_params *)hpa2hva(seed_arg[i].addr);
|
(struct image_boot_params *)hpa2hva(seed_arg[i].addr);
|
||||||
|
|
||||||
boot_params->p_seed_list = sos_vm_hpa2gpa(boot_params->p_seed_list);
|
boot_params->p_seed_list = service_vm_hpa2gpa(boot_params->p_seed_list);
|
||||||
|
|
||||||
boot_params->p_platform_info = sos_vm_hpa2gpa(boot_params->p_platform_info);
|
boot_params->p_platform_info = service_vm_hpa2gpa(boot_params->p_platform_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
@ -68,7 +68,7 @@ bool parse_seed_abl(uint64_t addr, struct physical_seed *phy_seed)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy out abl_seed for trusty and clear the original seed in memory.
|
* Copy out abl_seed for trusty and clear the original seed in memory.
|
||||||
* The SOS requires the legacy seed to derive RPMB key. So skip the
|
* The Service VM requires the legacy seed to derive RPMB key. So skip the
|
||||||
* legacy seed when clear original seed.
|
* legacy seed when clear original seed.
|
||||||
*/
|
*/
|
||||||
(void)memset((void *)&phy_seed->seed_list[0U], 0U, sizeof(phy_seed->seed_list));
|
(void)memset((void *)&phy_seed->seed_list[0U], 0U, sizeof(phy_seed->seed_list));
|
||||||
|
@ -72,14 +72,14 @@ static void *get_initrd_load_addr(struct acrn_vm *vm, uint64_t kernel_start)
|
|||||||
uint64_t mods_start, mods_end;
|
uint64_t mods_start, mods_end;
|
||||||
|
|
||||||
get_boot_mods_range(&mods_start, &mods_end);
|
get_boot_mods_range(&mods_start, &mods_end);
|
||||||
mods_start = sos_vm_hpa2gpa(mods_start);
|
mods_start = service_vm_hpa2gpa(mods_start);
|
||||||
mods_end = sos_vm_hpa2gpa(mods_end);
|
mods_end = service_vm_hpa2gpa(mods_end);
|
||||||
|
|
||||||
if (vm->sw.ramdisk_info.src_addr != NULL) {
|
if (vm->sw.ramdisk_info.src_addr != NULL) {
|
||||||
ramdisk_load_gpa = sos_vm_hpa2gpa((uint64_t)vm->sw.ramdisk_info.src_addr);
|
ramdisk_load_gpa = service_vm_hpa2gpa((uint64_t)vm->sw.ramdisk_info.src_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* For SOS VM, the ramdisk has been loaded by bootloader, so in most cases
|
/* For Service VM, the ramdisk has been loaded by bootloader, so in most cases
|
||||||
* there is no need to do gpa copy again. But in the case that the ramdisk is
|
* there is no need to do gpa copy again. But in the case that the ramdisk is
|
||||||
* loaded by bootloader at a address higher than its limit, we should do gpa
|
* loaded by bootloader at a address higher than its limit, we should do gpa
|
||||||
* copy then.
|
* copy then.
|
||||||
@ -150,8 +150,8 @@ static void *get_bzimage_kernel_load_addr(struct acrn_vm *vm)
|
|||||||
uint32_t kernel_size = kernel_init_size + kernel_align;
|
uint32_t kernel_size = kernel_init_size + kernel_align;
|
||||||
|
|
||||||
get_boot_mods_range(&mods_start, &mods_end);
|
get_boot_mods_range(&mods_start, &mods_end);
|
||||||
mods_start = sos_vm_hpa2gpa(mods_start);
|
mods_start = service_vm_hpa2gpa(mods_start);
|
||||||
mods_end = sos_vm_hpa2gpa(mods_end);
|
mods_end = service_vm_hpa2gpa(mods_end);
|
||||||
|
|
||||||
/* TODO: support load kernel when modules are beyond 4GB space. */
|
/* TODO: support load kernel when modules are beyond 4GB space. */
|
||||||
if (mods_end < MEM_4G) {
|
if (mods_end < MEM_4G) {
|
||||||
@ -168,7 +168,7 @@ static void *get_bzimage_kernel_load_addr(struct acrn_vm *vm)
|
|||||||
} else {
|
} else {
|
||||||
load_addr = (void *)zeropage->hdr.pref_addr;
|
load_addr = (void *)zeropage->hdr.pref_addr;
|
||||||
if (is_service_vm(vm)) {
|
if (is_service_vm(vm)) {
|
||||||
/* The non-relocatable SOS kernel might overlap with boot modules. */
|
/* The non-relocatable Servic VM kernel might overlap with boot modules. */
|
||||||
pr_err("Non-relocatable kernel found, risk to boot!");
|
pr_err("Non-relocatable kernel found, risk to boot!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -185,13 +185,13 @@ static void *get_bzimage_kernel_load_addr(struct acrn_vm *vm)
|
|||||||
/**
|
/**
|
||||||
* @pre vm != NULL && efi_mmap_desc != NULL
|
* @pre vm != NULL && efi_mmap_desc != NULL
|
||||||
*/
|
*/
|
||||||
static uint16_t create_sos_vm_efi_mmap_desc(struct acrn_vm *vm, struct efi_memory_desc *efi_mmap_desc)
|
static uint16_t create_service_vm_efi_mmap_desc(struct acrn_vm *vm, struct efi_memory_desc *efi_mmap_desc)
|
||||||
{
|
{
|
||||||
uint16_t i, desc_idx = 0U;
|
uint16_t i, desc_idx = 0U;
|
||||||
const struct efi_memory_desc *hv_efi_mmap_desc = get_efi_mmap_entry();
|
const struct efi_memory_desc *hv_efi_mmap_desc = get_efi_mmap_entry();
|
||||||
|
|
||||||
for (i = 0U; i < get_efi_mmap_entries_count(); i++) {
|
for (i = 0U; i < get_efi_mmap_entries_count(); i++) {
|
||||||
/* Below efi mmap desc types in native should be kept as original for SOS VM */
|
/* Below efi mmap desc types in native should be kept as original for Service VM */
|
||||||
if ((hv_efi_mmap_desc[i].type == EFI_RESERVED_MEMORYTYPE)
|
if ((hv_efi_mmap_desc[i].type == EFI_RESERVED_MEMORYTYPE)
|
||||||
|| (hv_efi_mmap_desc[i].type == EFI_UNUSABLE_MEMORY)
|
|| (hv_efi_mmap_desc[i].type == EFI_UNUSABLE_MEMORY)
|
||||||
|| (hv_efi_mmap_desc[i].type == EFI_ACPI_RECLAIM_MEMORY)
|
|| (hv_efi_mmap_desc[i].type == EFI_ACPI_RECLAIM_MEMORY)
|
||||||
@ -212,9 +212,9 @@ static uint16_t create_sos_vm_efi_mmap_desc(struct acrn_vm *vm, struct efi_memor
|
|||||||
|
|
||||||
for (i = 0U; i < vm->e820_entry_num; i++) {
|
for (i = 0U; i < vm->e820_entry_num; i++) {
|
||||||
/* The memory region with e820 type of RAM could be acted as EFI_CONVENTIONAL_MEMORY
|
/* The memory region with e820 type of RAM could be acted as EFI_CONVENTIONAL_MEMORY
|
||||||
* for SOS VM, the region which occupied by HV and pre-launched VM has been filtered
|
* for Service VM, the region which occupied by HV and pre-launched VM has been filtered
|
||||||
* already, so it is safe for SOS VM.
|
* already, so it is safe for Service VM.
|
||||||
* As SOS VM start to run after efi call ExitBootService(), the type of EFI_LOADER_CODE
|
* As Service VM start to run after efi call ExitBootService(), the type of EFI_LOADER_CODE
|
||||||
* and EFI_LOADER_DATA which have been mapped to E820_TYPE_RAM are not needed.
|
* and EFI_LOADER_DATA which have been mapped to E820_TYPE_RAM are not needed.
|
||||||
*/
|
*/
|
||||||
if (vm->e820_entries[i].type == E820_TYPE_RAM) {
|
if (vm->e820_entries[i].type == E820_TYPE_RAM) {
|
||||||
@ -228,7 +228,7 @@ static uint16_t create_sos_vm_efi_mmap_desc(struct acrn_vm *vm, struct efi_memor
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0U; i < desc_idx; i++) {
|
for (i = 0U; i < desc_idx; i++) {
|
||||||
pr_dbg("SOS VM efi mmap desc[%d]: addr: 0x%lx, len: 0x%lx, type: %d", i,
|
pr_dbg("Service VM efi mmap desc[%d]: addr: 0x%lx, len: 0x%lx, type: %d", i,
|
||||||
efi_mmap_desc[i].phys_addr, efi_mmap_desc[i].num_pages * PAGE_SIZE, efi_mmap_desc[i].type);
|
efi_mmap_desc[i].phys_addr, efi_mmap_desc[i].num_pages * PAGE_SIZE, efi_mmap_desc[i].type);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -279,19 +279,19 @@ static uint64_t create_zero_page(struct acrn_vm *vm, uint64_t load_params_gpa)
|
|||||||
struct acrn_boot_info *abi = get_acrn_boot_info();
|
struct acrn_boot_info *abi = get_acrn_boot_info();
|
||||||
|
|
||||||
if (boot_from_uefi(abi)) {
|
if (boot_from_uefi(abi)) {
|
||||||
struct efi_info *sos_efi_info = &zeropage->boot_efi_info;
|
struct efi_info *service_vm_efi_info = &zeropage->boot_efi_info;
|
||||||
uint64_t efi_mmap_gpa = BZIMG_EFIMMAP_GPA(load_params_gpa);
|
uint64_t efi_mmap_gpa = BZIMG_EFIMMAP_GPA(load_params_gpa);
|
||||||
struct efi_memory_desc *efi_mmap_desc = (struct efi_memory_desc *)gpa2hva(vm, efi_mmap_gpa);
|
struct efi_memory_desc *efi_mmap_desc = (struct efi_memory_desc *)gpa2hva(vm, efi_mmap_gpa);
|
||||||
uint16_t efi_mmap_desc_nr = create_sos_vm_efi_mmap_desc(vm, efi_mmap_desc);
|
uint16_t efi_mmap_desc_nr = create_service_vm_efi_mmap_desc(vm, efi_mmap_desc);
|
||||||
|
|
||||||
sos_efi_info->loader_signature = 0x34364c45; /* "EL64" */
|
service_vm_efi_info->loader_signature = 0x34364c45; /* "EL64" */
|
||||||
sos_efi_info->memdesc_version = abi->uefi_info.memdesc_version;
|
service_vm_efi_info->memdesc_version = abi->uefi_info.memdesc_version;
|
||||||
sos_efi_info->memdesc_size = sizeof(struct efi_memory_desc);
|
service_vm_efi_info->memdesc_size = sizeof(struct efi_memory_desc);
|
||||||
sos_efi_info->memmap_size = efi_mmap_desc_nr * sizeof(struct efi_memory_desc);
|
service_vm_efi_info->memmap_size = efi_mmap_desc_nr * sizeof(struct efi_memory_desc);
|
||||||
sos_efi_info->memmap = (uint32_t)efi_mmap_gpa;
|
service_vm_efi_info->memmap = (uint32_t)efi_mmap_gpa;
|
||||||
sos_efi_info->memmap_hi = (uint32_t)(efi_mmap_gpa >> 32U);
|
service_vm_efi_info->memmap_hi = (uint32_t)(efi_mmap_gpa >> 32U);
|
||||||
sos_efi_info->systab = abi->uefi_info.systab;
|
service_vm_efi_info->systab = abi->uefi_info.systab;
|
||||||
sos_efi_info->systab_hi = abi->uefi_info.systab_hi;
|
service_vm_efi_info->systab_hi = abi->uefi_info.systab_hi;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -71,7 +71,7 @@ static int32_t init_vm_kernel_info(struct acrn_vm *vm, const struct abi_module *
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* cmdline parsed from abi module string, for pre-launched VMs and SOS VM only. */
|
/* cmdline parsed from abi module string, for pre-launched VMs and Service VM only. */
|
||||||
static char mod_cmdline[PRE_VM_NUM + SOS_VM_NUM][MAX_BOOTARGS_SIZE] = { '\0' };
|
static char mod_cmdline[PRE_VM_NUM + SOS_VM_NUM][MAX_BOOTARGS_SIZE] = { '\0' };
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -94,26 +94,26 @@ static void init_vm_bootargs_info(struct acrn_vm *vm, const struct acrn_boot_inf
|
|||||||
char seed_args[MAX_SEED_ARG_SIZE] = "";
|
char seed_args[MAX_SEED_ARG_SIZE] = "";
|
||||||
|
|
||||||
fill_seed_arg(seed_args, MAX_SEED_ARG_SIZE);
|
fill_seed_arg(seed_args, MAX_SEED_ARG_SIZE);
|
||||||
/* Fill seed argument for SOS
|
/* Fill seed argument for Service VM
|
||||||
* seed_args string ends with a white space and '\0', so no additional delimiter is needed
|
* seed_args string ends with a white space and '\0', so no additional delimiter is needed
|
||||||
*/
|
*/
|
||||||
if (strncat_s((char *)vm->sw.bootargs_info.src_addr, MAX_BOOTARGS_SIZE,
|
if (strncat_s((char *)vm->sw.bootargs_info.src_addr, MAX_BOOTARGS_SIZE,
|
||||||
seed_args, (MAX_BOOTARGS_SIZE - 1U)) != 0) {
|
seed_args, (MAX_BOOTARGS_SIZE - 1U)) != 0) {
|
||||||
pr_err("failed to fill seed arg to SOS bootargs!");
|
pr_err("failed to fill seed arg to Service VM bootargs!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If there is cmdline from abi->cmdline, merge it with configured SOS bootargs.
|
/* If there is cmdline from abi->cmdline, merge it with configured Service VM bootargs.
|
||||||
* This is very helpful when one of configured bootargs need to be revised at GRUB runtime
|
* This is very helpful when one of configured bootargs need to be revised at GRUB runtime
|
||||||
* (e.g. "root="), since the later one would override the previous one if multiple bootargs exist.
|
* (e.g. "root="), since the later one would override the previous one if multiple bootargs exist.
|
||||||
*/
|
*/
|
||||||
if (abi->cmdline[0] != '\0') {
|
if (abi->cmdline[0] != '\0') {
|
||||||
if (strncat_s((char *)vm->sw.bootargs_info.src_addr, MAX_BOOTARGS_SIZE,
|
if (strncat_s((char *)vm->sw.bootargs_info.src_addr, MAX_BOOTARGS_SIZE,
|
||||||
abi->cmdline, (MAX_BOOTARGS_SIZE - 1U)) != 0) {
|
abi->cmdline, (MAX_BOOTARGS_SIZE - 1U)) != 0) {
|
||||||
pr_err("failed to merge mbi cmdline to SOS bootargs!");
|
pr_err("failed to merge mbi cmdline to Service VM bootargs!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pr_err("no space to append SOS bootargs!");
|
pr_err("no space to append Service VM bootargs!");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
#define MAX_PROTOCOL_NAME_SIZE 16U
|
#define MAX_PROTOCOL_NAME_SIZE 16U
|
||||||
#define MAX_MOD_STRING_SIZE 2048U
|
#define MAX_MOD_STRING_SIZE 2048U
|
||||||
|
|
||||||
/* The modules in multiboot are: Pre-launched VM: kernel/ramdisk/acpi; SOS VM: kernel/ramdisk */
|
/* The modules in multiboot are: Pre-launched VM: kernel/ramdisk/acpi; Service VM: kernel/ramdisk */
|
||||||
#define MAX_MODULE_NUM (3U * PRE_VM_NUM + 2U * SOS_VM_NUM)
|
#define MAX_MODULE_NUM (3U * PRE_VM_NUM + 2U * SOS_VM_NUM)
|
||||||
|
|
||||||
/* The vACPI module size is fixed to 1MB */
|
/* The vACPI module size is fixed to 1MB */
|
||||||
|
@ -75,9 +75,9 @@ inline static bool is_severity_pass(uint16_t target_vmid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief offline vcpu from SOS
|
* @brief offline vcpu from Service VM
|
||||||
*
|
*
|
||||||
* The function offline specific vcpu from SOS.
|
* The function offline specific vcpu from Service VM.
|
||||||
*
|
*
|
||||||
* @param vcpu Pointer to vCPU that initiates the hypercall
|
* @param vcpu Pointer to vCPU that initiates the hypercall
|
||||||
* @param param1 lapic id of the vcpu which wants to offline
|
* @param param1 lapic id of the vcpu which wants to offline
|
||||||
@ -85,7 +85,7 @@ inline static bool is_severity_pass(uint16_t target_vmid)
|
|||||||
* @pre is_service_vm(vcpu->vm)
|
* @pre is_service_vm(vcpu->vm)
|
||||||
* @return 0 on success, non-zero on error.
|
* @return 0 on success, non-zero on error.
|
||||||
*/
|
*/
|
||||||
int32_t hcall_sos_offline_cpu(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
|
int32_t hcall_service_vm_offline_cpu(struct acrn_vcpu *vcpu, __unused struct acrn_vm *target_vm,
|
||||||
uint64_t param1, __unused uint64_t param2)
|
uint64_t param1, __unused uint64_t param2)
|
||||||
{
|
{
|
||||||
struct acrn_vcpu *target_vcpu;
|
struct acrn_vcpu *target_vcpu;
|
||||||
@ -93,7 +93,7 @@ int32_t hcall_sos_offline_cpu(struct acrn_vcpu *vcpu, __unused struct acrn_vm *t
|
|||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
uint64_t lapicid = param1;
|
uint64_t lapicid = param1;
|
||||||
|
|
||||||
pr_info("sos offline cpu with lapicid %ld", lapicid);
|
pr_info("Service VM offline cpu with lapicid %ld", lapicid);
|
||||||
|
|
||||||
foreach_vcpu(i, vcpu->vm, target_vcpu) {
|
foreach_vcpu(i, vcpu->vm, target_vcpu) {
|
||||||
if (vlapic_get_apicid(vcpu_vlapic(target_vcpu)) == lapicid) {
|
if (vlapic_get_apicid(vcpu_vlapic(target_vcpu)) == lapicid) {
|
||||||
@ -113,7 +113,7 @@ int32_t hcall_sos_offline_cpu(struct acrn_vcpu *vcpu, __unused struct acrn_vm *t
|
|||||||
/**
|
/**
|
||||||
* @brief Get hypervisor api version
|
* @brief Get hypervisor api version
|
||||||
*
|
*
|
||||||
* The function only return api version information when VM is SOS_VM.
|
* The function only return api version information when VM is Service VM.
|
||||||
*
|
*
|
||||||
* @param vcpu Pointer to vCPU that initiates the hypercall
|
* @param vcpu Pointer to vCPU that initiates the hypercall
|
||||||
* @param param1 guest physical memory address. The api version returned
|
* @param param1 guest physical memory address. The api version returned
|
||||||
@ -283,7 +283,7 @@ int32_t hcall_create_vm(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint6
|
|||||||
pr_err("Wrong guest flags 0x%lx\n", vm_config->guest_flags);
|
pr_err("Wrong guest flags 0x%lx\n", vm_config->guest_flags);
|
||||||
} else {
|
} else {
|
||||||
if (create_vm(vmid, pcpu_bitmap, vm_config, &tgt_vm) == 0) {
|
if (create_vm(vmid, pcpu_bitmap, vm_config, &tgt_vm) == 0) {
|
||||||
/* return a relative vm_id from SOS view */
|
/* return a relative vm_id from Service VM view */
|
||||||
cv.vmid = vmid_2_rel_vmid(vm->vm_id, vmid);
|
cv.vmid = vmid_2_rel_vmid(vm->vm_id, vmid);
|
||||||
cv.vcpu_num = tgt_vm->hw.created_vcpus;
|
cv.vcpu_num = tgt_vm->hw.created_vcpus;
|
||||||
} else {
|
} else {
|
||||||
@ -606,13 +606,13 @@ int32_t hcall_notify_ioreq_finish(__unused struct acrn_vcpu *vcpu, struct acrn_v
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
*@pre is_service_vm(vm)
|
*@pre is_service_vm(vm)
|
||||||
*@pre gpa2hpa(vm, region->sos_vm_gpa) != INVALID_HPA
|
*@pre gpa2hpa(vm, region->service_vm_gpa) != INVALID_HPA
|
||||||
*/
|
*/
|
||||||
static void add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_vm,
|
static void add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_vm,
|
||||||
const struct vm_memory_region *region,uint64_t *pml4_page)
|
const struct vm_memory_region *region,uint64_t *pml4_page)
|
||||||
{
|
{
|
||||||
uint64_t prot = 0UL, base_paddr;
|
uint64_t prot = 0UL, base_paddr;
|
||||||
uint64_t hpa = gpa2hpa(vm, region->sos_vm_gpa);
|
uint64_t hpa = gpa2hpa(vm, region->service_vm_gpa);
|
||||||
|
|
||||||
/* access right */
|
/* access right */
|
||||||
if ((region->prot & MEM_ACCESS_READ) != 0U) {
|
if ((region->prot & MEM_ACCESS_READ) != 0U) {
|
||||||
@ -641,7 +641,7 @@ static void add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_vm,
|
|||||||
/* If Software SRAM is initialized, and HV received a request to map Software SRAM
|
/* If Software SRAM is initialized, and HV received a request to map Software SRAM
|
||||||
* area to guest, we should add EPT_WB flag to make Software SRAM effective.
|
* area to guest, we should add EPT_WB flag to make Software SRAM effective.
|
||||||
* TODO: We can enforce WB for any region has overlap with Software SRAM, for simplicity,
|
* TODO: We can enforce WB for any region has overlap with Software SRAM, for simplicity,
|
||||||
* and leave it to SOS to make sure it won't violate.
|
* and leave it to Service VM to make sure it won't violate.
|
||||||
*/
|
*/
|
||||||
if (is_software_sram_enabled()) {
|
if (is_software_sram_enabled()) {
|
||||||
base_paddr = get_software_sram_base();
|
base_paddr = get_software_sram_base();
|
||||||
@ -667,8 +667,8 @@ static int32_t set_vm_memory_region(struct acrn_vm *vm,
|
|||||||
if ((region->size & (PAGE_SIZE - 1UL)) == 0UL) {
|
if ((region->size & (PAGE_SIZE - 1UL)) == 0UL) {
|
||||||
pml4_page = (uint64_t *)target_vm->arch_vm.nworld_eptp;
|
pml4_page = (uint64_t *)target_vm->arch_vm.nworld_eptp;
|
||||||
if (region->type == MR_ADD) {
|
if (region->type == MR_ADD) {
|
||||||
/* if the GPA range is SOS valid GPA or not */
|
/* if the GPA range is Service VM valid GPA or not */
|
||||||
if (ept_is_valid_mr(vm, region->sos_vm_gpa, region->size)) {
|
if (ept_is_valid_mr(vm, region->service_vm_gpa, region->size)) {
|
||||||
/* FIXME: how to filter the alias mapping ? */
|
/* FIXME: how to filter the alias mapping ? */
|
||||||
add_vm_memory_region(vm, target_vm, region, pml4_page);
|
add_vm_memory_region(vm, target_vm, region, pml4_page);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -682,9 +682,9 @@ static int32_t set_vm_memory_region(struct acrn_vm *vm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg((ret == 0) ? DBG_LEVEL_HYCALL : LOG_ERROR,
|
dev_dbg((ret == 0) ? DBG_LEVEL_HYCALL : LOG_ERROR,
|
||||||
"[vm%d] type=%d gpa=0x%x sos_gpa=0x%x sz=0x%x",
|
"[vm%d] type=%d gpa=0x%x service_vm_gpa=0x%x sz=0x%x",
|
||||||
target_vm->vm_id, region->type, region->gpa,
|
target_vm->vm_id, region->type, region->gpa,
|
||||||
region->sos_vm_gpa, region->size);
|
region->service_vm_gpa, region->size);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1210,7 +1210,7 @@ int32_t hcall_vm_intr_monitor(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
|
|||||||
*
|
*
|
||||||
* This is the API that helps to switch the notifer vecotr. If this API is
|
* This is the API that helps to switch the notifer vecotr. If this API is
|
||||||
* not called, the hypervisor will use the default notifier vector(0xF3)
|
* not called, the hypervisor will use the default notifier vector(0xF3)
|
||||||
* to notify the SOS kernel.
|
* to notify the Service VM kernel.
|
||||||
*
|
*
|
||||||
* @param vcpu Pointer to vCPU that initiates the hypercall
|
* @param vcpu Pointer to vCPU that initiates the hypercall
|
||||||
* @param param1 the expected notifier vector from guest
|
* @param param1 the expected notifier vector from guest
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
#include <ticks.h>
|
#include <ticks.h>
|
||||||
|
|
||||||
/* buf size should be identical to the size in hvlog option, which is
|
/* buf size should be identical to the size in hvlog option, which is
|
||||||
* transfered to SOS:
|
* transfered to Service VM:
|
||||||
* bsp/uefi/clearlinux/acrn.conf: hvlog=2M@0x1FE00000
|
* bsp/uefi/clearlinux/acrn.conf: hvlog=2M@0x1FE00000
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -298,7 +298,7 @@ static int32_t profiling_sbuf_put_variable(struct shared_buf *sbuf,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read profiling data and transferred to SOS
|
* Read profiling data and transferred to Service VM
|
||||||
* Drop transfer of profiling data if sbuf is full/insufficient and log it
|
* Drop transfer of profiling data if sbuf is full/insufficient and log it
|
||||||
*/
|
*/
|
||||||
static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||||
@ -1394,7 +1394,7 @@ void profiling_setup(void)
|
|||||||
|
|
||||||
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
dev_dbg(DBG_LEVEL_PROFILING, "%s: entering", __func__);
|
||||||
cpu = get_pcpu_id();
|
cpu = get_pcpu_id();
|
||||||
/* support PMI notification, SOS_VM will register all CPU */
|
/* support PMI notification, Service VM will register all CPU */
|
||||||
if ((cpu == BSP_CPU_ID) && (profiling_pmi_irq == IRQ_INVALID)) {
|
if ((cpu == BSP_CPU_ID) && (profiling_pmi_irq == IRQ_INVALID)) {
|
||||||
pr_info("%s: calling request_irq", __func__);
|
pr_info("%s: calling request_irq", __func__);
|
||||||
retval = request_irq(PMI_IRQ,
|
retval = request_irq(PMI_IRQ,
|
||||||
|
@ -977,7 +977,7 @@ static int32_t shell_to_vm_console(int32_t argc, char **argv)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
console_vmid = vm_id;
|
console_vmid = vm_id;
|
||||||
/* Output that switching to SOS shell */
|
/* Output that switching to Service VM shell */
|
||||||
snprintf(temp_str, TEMP_STR_SIZE, "\r\n----- Entering VM %d Shell -----\r\n", vm_id);
|
snprintf(temp_str, TEMP_STR_SIZE, "\r\n----- Entering VM %d Shell -----\r\n", vm_id);
|
||||||
|
|
||||||
shell_puts(temp_str);
|
shell_puts(temp_str);
|
||||||
|
@ -70,7 +70,7 @@ static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Deliver \p io_req to SOS and suspend \p vcpu till its completion
|
* @brief Deliver \p io_req to Service VM and suspend \p vcpu till its completion
|
||||||
*
|
*
|
||||||
* @param vcpu The virtual CPU that triggers the MMIO access
|
* @param vcpu The virtual CPU that triggers the MMIO access
|
||||||
* @param io_req The I/O request holding the details of the MMIO access
|
* @param io_req The I/O request holding the details of the MMIO access
|
||||||
@ -168,7 +168,7 @@ void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t state)
|
|||||||
acrn_io_req = &req_buf->req_slot[vcpu_id];
|
acrn_io_req = &req_buf->req_slot[vcpu_id];
|
||||||
/*
|
/*
|
||||||
* HV will only set processed to ACRN_IOREQ_STATE_PENDING or ACRN_IOREQ_STATE_FREE.
|
* HV will only set processed to ACRN_IOREQ_STATE_PENDING or ACRN_IOREQ_STATE_FREE.
|
||||||
* we don't need to sfence here is that even if the SOS/DM sees the previous state,
|
* we don't need to sfence here is that even if the Service-VM/DM sees the previous state,
|
||||||
* the only side effect is that it will defer the processing of the new IOReq.
|
* the only side effect is that it will defer the processing of the new IOReq.
|
||||||
* It won't lead wrong processing.
|
* It won't lead wrong processing.
|
||||||
*/
|
*/
|
||||||
|
@ -122,7 +122,7 @@ vgsi_to_vioapic_and_vpin(const struct acrn_vm *vm, uint32_t vgsi, uint32_t *vpin
|
|||||||
|
|
||||||
if (is_service_vm(vm)) {
|
if (is_service_vm(vm)) {
|
||||||
/*
|
/*
|
||||||
* Utilize platform ioapic_info for SOS VM
|
* Utilize platform ioapic_info for Service VM
|
||||||
*/
|
*/
|
||||||
vioapic_index = get_gsi_to_ioapic_index(vgsi);
|
vioapic_index = get_gsi_to_ioapic_index(vgsi);
|
||||||
if (vpin != NULL) {
|
if (vpin != NULL) {
|
||||||
|
@ -258,7 +258,7 @@ static void vdev_pt_allow_io_vbar(struct pci_vdev *vdev, uint32_t idx)
|
|||||||
{
|
{
|
||||||
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
|
|
||||||
/* For SOS, all port IO access is allowed by default, so skip SOS here */
|
/* For Service VM, all port IO access is allowed by default, so skip Service VM here */
|
||||||
if (!is_service_vm(vm)) {
|
if (!is_service_vm(vm)) {
|
||||||
struct pci_vbar *vbar = &vdev->vbars[idx];
|
struct pci_vbar *vbar = &vdev->vbars[idx];
|
||||||
if (vbar->base_gpa != 0UL) {
|
if (vbar->base_gpa != 0UL) {
|
||||||
@ -276,7 +276,7 @@ static void vdev_pt_deny_io_vbar(struct pci_vdev *vdev, uint32_t idx)
|
|||||||
{
|
{
|
||||||
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
|
|
||||||
/* For SOS, all port IO access is allowed by default, so skip SOS here */
|
/* For Service VM, all port IO access is allowed by default, so skip Service VM here */
|
||||||
if (!is_service_vm(vm)) {
|
if (!is_service_vm(vm)) {
|
||||||
struct pci_vbar *vbar = &vdev->vbars[idx];
|
struct pci_vbar *vbar = &vdev->vbars[idx];
|
||||||
if (vbar->base_gpa != 0UL) {
|
if (vbar->base_gpa != 0UL) {
|
||||||
@ -556,7 +556,7 @@ void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (vdev->phyfun->vpci != vdev->vpci) {
|
if (vdev->phyfun->vpci != vdev->vpci) {
|
||||||
/* VF is assigned to a UOS */
|
/* VF is assigned to a User VM */
|
||||||
uint32_t vid, did;
|
uint32_t vid, did;
|
||||||
|
|
||||||
vdev->nr_bars = PCI_BAR_COUNT;
|
vdev->nr_bars = PCI_BAR_COUNT;
|
||||||
|
@ -155,7 +155,7 @@ static void pci_vdev_update_vbar_base(struct pci_vdev *vdev, uint32_t idx)
|
|||||||
|| (!mem_aligned_check(base, vdev->vbars[idx].size))) {
|
|| (!mem_aligned_check(base, vdev->vbars[idx].size))) {
|
||||||
res = (base < (1UL << 32UL)) ? &(vdev->vpci->res32): &(vdev->vpci->res64);
|
res = (base < (1UL << 32UL)) ? &(vdev->vpci->res32): &(vdev->vpci->res64);
|
||||||
/* VM tries to reprogram vbar address out of pci mmio bar window, it can be caused by:
|
/* VM tries to reprogram vbar address out of pci mmio bar window, it can be caused by:
|
||||||
* 1. For SOS, <board>.xml is misaligned with the actual native platform, and we get wrong mmio window.
|
* 1. For Service VM, <board>.xml is misaligned with the actual native platform, and we get wrong mmio window.
|
||||||
* 2. Malicious operation from VM, it tries to reprogram vbar address out of pci mmio bar window
|
* 2. Malicious operation from VM, it tries to reprogram vbar address out of pci mmio bar window
|
||||||
*/
|
*/
|
||||||
pr_err("%s reprogram PCI:%02x:%02x.%x BAR%d to addr:0x%lx,"
|
pr_err("%s reprogram PCI:%02x:%02x.%x BAR%d to addr:0x%lx,"
|
||||||
|
@ -118,7 +118,7 @@ static void init_vhostbridge(struct pci_vdev *vdev)
|
|||||||
*/
|
*/
|
||||||
pciexbar_low = UOS_VIRT_PCI_MMCFG_BASE | 0x1U;
|
pciexbar_low = UOS_VIRT_PCI_MMCFG_BASE | 0x1U;
|
||||||
} else {
|
} else {
|
||||||
/*Inject physical ECAM value to SOS vhostbridge since SOS may check PCIe-MMIO Base Address with it */
|
/*Inject physical ECAM value to Service VM vhostbridge since Service VM may check PCIe-MMIO Base Address with it */
|
||||||
phys_did = pci_pdev_read_cfg(hostbridge_bdf, PCIR_DEVICE, 2);
|
phys_did = pci_pdev_read_cfg(hostbridge_bdf, PCIR_DEVICE, 2);
|
||||||
for (i = 0U; i < (sizeof(hostbridge_did_highbytes) / sizeof(uint32_t)); i++) {
|
for (i = 0U; i < (sizeof(hostbridge_did_highbytes) / sizeof(uint32_t)); i++) {
|
||||||
if (((phys_did & 0xff00U) >> 8) == hostbridge_did_highbytes[i]) {
|
if (((phys_did & 0xff00U) >> 8) == hostbridge_did_highbytes[i]) {
|
||||||
|
@ -107,7 +107,7 @@ static bool vpci_pio_cfgaddr_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t
|
|||||||
* @pre vcpu->vm != NULL
|
* @pre vcpu->vm != NULL
|
||||||
* @pre vcpu->vm->vm_id < CONFIG_MAX_VM_NUM
|
* @pre vcpu->vm->vm_id < CONFIG_MAX_VM_NUM
|
||||||
* @pre (get_vm_config(vcpu->vm->vm_id)->load_order == PRE_LAUNCHED_VM)
|
* @pre (get_vm_config(vcpu->vm->vm_id)->load_order == PRE_LAUNCHED_VM)
|
||||||
* || (get_vm_config(vcpu->vm->vm_id)->load_order == SOS_VM)
|
* || (get_vm_config(vcpu->vm->vm_id)->load_order == SERVICE_VM)
|
||||||
*
|
*
|
||||||
* @retval true on success.
|
* @retval true on success.
|
||||||
* @retval false. (ACRN will deliver this IO request to DM to handle for post-launched VM)
|
* @retval false. (ACRN will deliver this IO request to DM to handle for post-launched VM)
|
||||||
@ -224,7 +224,7 @@ int32_t init_vpci(struct acrn_vm *vm)
|
|||||||
vm->iommu = create_iommu_domain(vm->vm_id, hva2hpa(vm->arch_vm.nworld_eptp), 48U);
|
vm->iommu = create_iommu_domain(vm->vm_id, hva2hpa(vm->arch_vm.nworld_eptp), 48U);
|
||||||
|
|
||||||
vm_config = get_vm_config(vm->vm_id);
|
vm_config = get_vm_config(vm->vm_id);
|
||||||
/* virtual PCI MMCONFIG for SOS is same with the physical value */
|
/* virtual PCI MMCONFIG for Service VM is same with the physical value */
|
||||||
if (vm_config->load_order == SOS_VM) {
|
if (vm_config->load_order == SOS_VM) {
|
||||||
pci_mmcfg = get_mmcfg_region();
|
pci_mmcfg = get_mmcfg_region();
|
||||||
vm->vpci.pci_mmcfg = *pci_mmcfg;
|
vm->vpci.pci_mmcfg = *pci_mmcfg;
|
||||||
@ -341,7 +341,7 @@ static void remove_vdev_pt_iommu_domain(const struct pci_vdev *vdev)
|
|||||||
* @brief Find an available vdev structure with BDF from a specified vpci structure.
|
* @brief Find an available vdev structure with BDF from a specified vpci structure.
|
||||||
* If the vdev's vpci is the same as the specified vpci, the vdev is available.
|
* If the vdev's vpci is the same as the specified vpci, the vdev is available.
|
||||||
* If the vdev's vpci is not the same as the specified vpci, the vdev has already
|
* If the vdev's vpci is not the same as the specified vpci, the vdev has already
|
||||||
* been assigned and it is unavailable for SOS.
|
* been assigned and it is unavailable for Service VM.
|
||||||
* If the vdev's vpci is NULL, the vdev is a orphan/zombie instance, it can't
|
* If the vdev's vpci is NULL, the vdev is a orphan/zombie instance, it can't
|
||||||
* be accessed by any vpci.
|
* be accessed by any vpci.
|
||||||
*
|
*
|
||||||
@ -358,7 +358,7 @@ static struct pci_vdev *find_available_vdev(struct acrn_vpci *vpci, union pci_bd
|
|||||||
|
|
||||||
if ((vdev != NULL) && (vdev->user != vdev)) {
|
if ((vdev != NULL) && (vdev->user != vdev)) {
|
||||||
if (vdev->user != NULL) {
|
if (vdev->user != NULL) {
|
||||||
/* the SOS is able to access, if and only if the SOS has higher severity than the UOS. */
|
/* the Service VM is able to access, if and only if the Service VM has higher severity than the UOS. */
|
||||||
if (get_vm_severity(vpci2vm(vpci)->vm_id) <
|
if (get_vm_severity(vpci2vm(vpci)->vm_id) <
|
||||||
get_vm_severity(vpci2vm(vdev->user->vpci)->vm_id)) {
|
get_vm_severity(vpci2vm(vdev->user->vpci)->vm_id)) {
|
||||||
vdev = NULL;
|
vdev = NULL;
|
||||||
@ -693,7 +693,7 @@ static int32_t vpci_init_vdevs(struct acrn_vm *vm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief assign a PCI device from SOS to target post-launched VM.
|
* @brief assign a PCI device from Service VM to target post-launched VM.
|
||||||
*
|
*
|
||||||
* @pre tgt_vm != NULL
|
* @pre tgt_vm != NULL
|
||||||
* @pre pcidev != NULL
|
* @pre pcidev != NULL
|
||||||
@ -702,41 +702,41 @@ int32_t vpci_assign_pcidev(struct acrn_vm *tgt_vm, struct acrn_pcidev *pcidev)
|
|||||||
{
|
{
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
uint32_t idx;
|
uint32_t idx;
|
||||||
struct pci_vdev *vdev_in_sos, *vdev;
|
struct pci_vdev *vdev_in_service_vm, *vdev;
|
||||||
struct acrn_vpci *vpci;
|
struct acrn_vpci *vpci;
|
||||||
union pci_bdf bdf;
|
union pci_bdf bdf;
|
||||||
struct acrn_vm *sos_vm;
|
struct acrn_vm *service_vm;
|
||||||
|
|
||||||
bdf.value = pcidev->phys_bdf;
|
bdf.value = pcidev->phys_bdf;
|
||||||
sos_vm = get_sos_vm();
|
service_vm = get_service_vm();
|
||||||
spinlock_obtain(&sos_vm->vpci.lock);
|
spinlock_obtain(&service_vm->vpci.lock);
|
||||||
vdev_in_sos = pci_find_vdev(&sos_vm->vpci, bdf);
|
vdev_in_service_vm = pci_find_vdev(&service_vm->vpci, bdf);
|
||||||
if ((vdev_in_sos != NULL) && (vdev_in_sos->user == vdev_in_sos) &&
|
if ((vdev_in_service_vm != NULL) && (vdev_in_service_vm->user == vdev_in_service_vm) &&
|
||||||
(vdev_in_sos->pdev != NULL) &&
|
(vdev_in_service_vm->pdev != NULL) &&
|
||||||
!is_host_bridge(vdev_in_sos->pdev) && !is_bridge(vdev_in_sos->pdev)) {
|
!is_host_bridge(vdev_in_service_vm->pdev) && !is_bridge(vdev_in_service_vm->pdev)) {
|
||||||
|
|
||||||
/* ToDo: Each PT device must support one type reset */
|
/* ToDo: Each PT device must support one type reset */
|
||||||
if (!vdev_in_sos->pdev->has_pm_reset && !vdev_in_sos->pdev->has_flr &&
|
if (!vdev_in_service_vm->pdev->has_pm_reset && !vdev_in_service_vm->pdev->has_flr &&
|
||||||
!vdev_in_sos->pdev->has_af_flr) {
|
!vdev_in_service_vm->pdev->has_af_flr) {
|
||||||
pr_fatal("%s %x:%x.%x not support FLR or not support PM reset\n",
|
pr_fatal("%s %x:%x.%x not support FLR or not support PM reset\n",
|
||||||
__func__, bdf.bits.b, bdf.bits.d, bdf.bits.f);
|
__func__, bdf.bits.b, bdf.bits.d, bdf.bits.f);
|
||||||
} else {
|
} else {
|
||||||
/* DM will reset this device before assigning it */
|
/* DM will reset this device before assigning it */
|
||||||
pdev_restore_bar(vdev_in_sos->pdev);
|
pdev_restore_bar(vdev_in_service_vm->pdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
vdev_in_sos->vdev_ops->deinit_vdev(vdev_in_sos);
|
vdev_in_service_vm->vdev_ops->deinit_vdev(vdev_in_service_vm);
|
||||||
|
|
||||||
vpci = &(tgt_vm->vpci);
|
vpci = &(tgt_vm->vpci);
|
||||||
|
|
||||||
spinlock_obtain(&tgt_vm->vpci.lock);
|
spinlock_obtain(&tgt_vm->vpci.lock);
|
||||||
vdev = vpci_init_vdev(vpci, vdev_in_sos->pci_dev_config, vdev_in_sos->phyfun);
|
vdev = vpci_init_vdev(vpci, vdev_in_service_vm->pci_dev_config, vdev_in_service_vm->phyfun);
|
||||||
pci_vdev_write_vcfg(vdev, PCIR_INTERRUPT_LINE, 1U, pcidev->intr_line);
|
pci_vdev_write_vcfg(vdev, PCIR_INTERRUPT_LINE, 1U, pcidev->intr_line);
|
||||||
pci_vdev_write_vcfg(vdev, PCIR_INTERRUPT_PIN, 1U, pcidev->intr_pin);
|
pci_vdev_write_vcfg(vdev, PCIR_INTERRUPT_PIN, 1U, pcidev->intr_pin);
|
||||||
for (idx = 0U; idx < vdev->nr_bars; idx++) {
|
for (idx = 0U; idx < vdev->nr_bars; idx++) {
|
||||||
/* VF is assigned to a UOS */
|
/* VF is assigned to a UOS */
|
||||||
if (vdev->phyfun != NULL) {
|
if (vdev->phyfun != NULL) {
|
||||||
vdev->vbars[idx] = vdev_in_sos->vbars[idx];
|
vdev->vbars[idx] = vdev_in_service_vm->vbars[idx];
|
||||||
if (has_msix_cap(vdev) && (idx == vdev->msix.table_bar)) {
|
if (has_msix_cap(vdev) && (idx == vdev->msix.table_bar)) {
|
||||||
vdev->msix.mmio_hpa = vdev->vbars[idx].base_hpa;
|
vdev->msix.mmio_hpa = vdev->vbars[idx].base_hpa;
|
||||||
vdev->msix.mmio_size = vdev->vbars[idx].size;
|
vdev->msix.mmio_size = vdev->vbars[idx].size;
|
||||||
@ -753,11 +753,11 @@ int32_t vpci_assign_pcidev(struct acrn_vm *tgt_vm, struct acrn_pcidev *pcidev)
|
|||||||
/*We should re-add the vdev to hashlist since its vbdf has changed */
|
/*We should re-add the vdev to hashlist since its vbdf has changed */
|
||||||
hlist_del(&vdev->link);
|
hlist_del(&vdev->link);
|
||||||
hlist_add_head(&vdev->link, &vpci->vdevs_hlist_heads[hash64(vdev->bdf.value, VDEV_LIST_HASHBITS)]);
|
hlist_add_head(&vdev->link, &vpci->vdevs_hlist_heads[hash64(vdev->bdf.value, VDEV_LIST_HASHBITS)]);
|
||||||
vdev->parent_user = vdev_in_sos;
|
vdev->parent_user = vdev_in_service_vm;
|
||||||
vdev_in_sos->user = vdev;
|
vdev_in_service_vm->user = vdev;
|
||||||
} else {
|
} else {
|
||||||
vdev->vdev_ops->deinit_vdev(vdev);
|
vdev->vdev_ops->deinit_vdev(vdev);
|
||||||
vdev_in_sos->vdev_ops->init_vdev(vdev_in_sos);
|
vdev_in_service_vm->vdev_ops->init_vdev(vdev_in_service_vm);
|
||||||
}
|
}
|
||||||
spinlock_release(&tgt_vm->vpci.lock);
|
spinlock_release(&tgt_vm->vpci.lock);
|
||||||
} else {
|
} else {
|
||||||
@ -767,13 +767,13 @@ int32_t vpci_assign_pcidev(struct acrn_vm *tgt_vm, struct acrn_pcidev *pcidev)
|
|||||||
pcidev->virt_bdf >> 8U, (pcidev->virt_bdf >> 3U) & 0x1fU, pcidev->virt_bdf & 0x7U);
|
pcidev->virt_bdf >> 8U, (pcidev->virt_bdf >> 3U) & 0x1fU, pcidev->virt_bdf & 0x7U);
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
}
|
}
|
||||||
spinlock_release(&sos_vm->vpci.lock);
|
spinlock_release(&service_vm->vpci.lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief deassign a PCI device from target post-launched VM to SOS.
|
* @brief deassign a PCI device from target post-launched VM to Service VM.
|
||||||
*
|
*
|
||||||
* @pre tgt_vm != NULL
|
* @pre tgt_vm != NULL
|
||||||
* @pre pcidev != NULL
|
* @pre pcidev != NULL
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
* 1. configure tool can select whether a PCI bridge is emulated or pass through
|
* 1. configure tool can select whether a PCI bridge is emulated or pass through
|
||||||
*
|
*
|
||||||
* Open:
|
* Open:
|
||||||
* 1. SOS how to reset PCI devices under the PCI bridge
|
* 1. Service VM how to reset PCI devices under the PCI bridge
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <asm/guest/vm.h>
|
#include <asm/guest/vm.h>
|
||||||
|
@ -216,7 +216,7 @@ static void enable_vfs(struct pci_vdev *pf_vdev)
|
|||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* If the VF physical device was not created successfully, the pdev/vdev
|
* If the VF physical device was not created successfully, the pdev/vdev
|
||||||
* will also not be created so that SOS can aware of VF creation failure,
|
* will also not be created so that Service VM can aware of VF creation failure,
|
||||||
*/
|
*/
|
||||||
pr_err("PF %x:%x.%x can't create VFs after 100 ms",
|
pr_err("PF %x:%x.%x can't create VFs after 100 ms",
|
||||||
pf_vdev->bdf.bits.b, pf_vdev->bdf.bits.d, pf_vdev->bdf.bits.f);
|
pf_vdev->bdf.bits.b, pf_vdev->bdf.bits.d, pf_vdev->bdf.bits.f);
|
||||||
@ -238,7 +238,7 @@ static void disable_vfs(struct pci_vdev *pf_vdev)
|
|||||||
* we simply set the VF instance status to "zombie" to avoid dynamically adding/removing
|
* we simply set the VF instance status to "zombie" to avoid dynamically adding/removing
|
||||||
* resources
|
* resources
|
||||||
*
|
*
|
||||||
* If the VF drivers are still running in SOS or UOS, the MMIO access will return 0xFF.
|
* If the VF drivers are still running in Service VM or User VM, the MMIO access will return 0xFF.
|
||||||
*/
|
*/
|
||||||
num_vfs = read_sriov_reg(pf_vdev, PCIR_SRIOV_NUMVFS);
|
num_vfs = read_sriov_reg(pf_vdev, PCIR_SRIOV_NUMVFS);
|
||||||
first = read_sriov_reg(pf_vdev, PCIR_SRIOV_FST_VF_OFF);
|
first = read_sriov_reg(pf_vdev, PCIR_SRIOV_FST_VF_OFF);
|
||||||
|
@ -327,7 +327,7 @@ static uint32_t vpin_to_vgsi(const struct acrn_vm *vm, uint32_t vpin)
|
|||||||
|
|
||||||
if (is_service_vm(vm)) {
|
if (is_service_vm(vm)) {
|
||||||
/*
|
/*
|
||||||
* For SOS VM vPIC pin to GSI is same as the one
|
* For Service VM vPIC pin to GSI is same as the one
|
||||||
* that is used for platform
|
* that is used for platform
|
||||||
*/
|
*/
|
||||||
vgsi = get_pic_pin_from_ioapic_pin(vpin);
|
vgsi = get_pic_pin_from_ioapic_pin(vpin);
|
||||||
@ -363,7 +363,7 @@ static uint32_t vgsi_to_vpin(const struct acrn_vm *vm, uint32_t vgsi)
|
|||||||
|
|
||||||
if (is_service_vm(vm)) {
|
if (is_service_vm(vm)) {
|
||||||
/*
|
/*
|
||||||
* For SOS VM vPIC pin to GSI is same as the one
|
* For Service VM vPIC pin to GSI is same as the one
|
||||||
* that is used for platform
|
* that is used for platform
|
||||||
*/
|
*/
|
||||||
vpin = get_pic_pin_from_ioapic_pin(vgsi);
|
vpin = get_pic_pin_from_ioapic_pin(vgsi);
|
||||||
|
@ -558,7 +558,7 @@ static void pci_parse_iommu_devscopes(struct pci_bdf_mapping_group *const bdfs_f
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are some rules to config PCI bridge: try to avoid interference between SOS and RTVM or
|
* There are some rules to config PCI bridge: try to avoid interference between Service VM and RTVM or
|
||||||
* pre-launched VM; and to support some features like SRIOV by default, so as following:
|
* pre-launched VM; and to support some features like SRIOV by default, so as following:
|
||||||
* 1. disable interrupt, including INTx and MSI.
|
* 1. disable interrupt, including INTx and MSI.
|
||||||
* 2. enable ARI if it's a PCIe bridge and all its sub devices support ARI (need check further).
|
* 2. enable ARI if it's a PCIe bridge and all its sub devices support ARI (need check further).
|
||||||
@ -600,7 +600,7 @@ static void config_pci_bridge(const struct pci_pdev *pdev)
|
|||||||
/*
|
/*
|
||||||
* @brief: walks through all pdevs that have been initialized and determine
|
* @brief: walks through all pdevs that have been initialized and determine
|
||||||
* which pdevs need to be added to pci dev_config. The pdevs added to pci
|
* which pdevs need to be added to pci dev_config. The pdevs added to pci
|
||||||
* dev_config will be exposed to SOS finally.
|
* dev_config will be exposed to Service VM finally.
|
||||||
*/
|
*/
|
||||||
static void init_all_dev_config(void)
|
static void init_all_dev_config(void)
|
||||||
{
|
{
|
||||||
|
@ -55,8 +55,8 @@ void ptirq_intx_ack(struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ctlr vgsi_c
|
|||||||
* @return
|
* @return
|
||||||
* - 0: on success
|
* - 0: on success
|
||||||
* - \p -ENODEV:
|
* - \p -ENODEV:
|
||||||
* - for SOS, the entry already be held by others
|
* - for Service VM, the entry already be held by others
|
||||||
* - for UOS, no pre-hold mapping found.
|
* - for User VM, no pre-hold mapping found.
|
||||||
*
|
*
|
||||||
* @pre vm != NULL
|
* @pre vm != NULL
|
||||||
* @pre info != NULL
|
* @pre info != NULL
|
||||||
@ -79,8 +79,8 @@ int32_t ptirq_prepare_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf, uint16_
|
|||||||
* @return
|
* @return
|
||||||
* - 0: on success
|
* - 0: on success
|
||||||
* - \p -ENODEV:
|
* - \p -ENODEV:
|
||||||
* - for SOS, the entry already be held by others
|
* - for Service VM, the entry already be held by others
|
||||||
* - for UOS, no pre-hold mapping found.
|
* - for User VM, no pre-hold mapping found.
|
||||||
*
|
*
|
||||||
* @pre vm != NULL
|
* @pre vm != NULL
|
||||||
*
|
*
|
||||||
@ -90,7 +90,7 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ct
|
|||||||
/**
|
/**
|
||||||
* @brief Add an interrupt remapping entry for INTx as pre-hold mapping.
|
* @brief Add an interrupt remapping entry for INTx as pre-hold mapping.
|
||||||
*
|
*
|
||||||
* Except sos_vm, Device Model should call this function to pre-hold ptdev intx
|
* Except Service VM, Device Model should call this function to pre-hold ptdev intx
|
||||||
* The entry is identified by phys_pin, one entry vs. one phys_pin.
|
* The entry is identified by phys_pin, one entry vs. one phys_pin.
|
||||||
* Currently, one phys_pin can only be held by one pin source (vPIC or vIOAPIC).
|
* Currently, one phys_pin can only be held by one pin source (vPIC or vIOAPIC).
|
||||||
*
|
*
|
||||||
|
@ -64,13 +64,13 @@ uint64_t gpa2hpa(struct acrn_vm *vm, uint64_t gpa);
|
|||||||
*/
|
*/
|
||||||
uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size);
|
uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size);
|
||||||
/**
|
/**
|
||||||
* @brief Translating from host-physical address to guest-physical address for SOS_VM
|
* @brief Translating from host-physical address to guest-physical address for Service VM
|
||||||
*
|
*
|
||||||
* @param[in] hpa the specified host-physical address
|
* @param[in] hpa the specified host-physical address
|
||||||
*
|
*
|
||||||
* @pre: the gpa and hpa are identical mapping in SOS.
|
* @pre: the gpa and hpa are identical mapping in Service VM.
|
||||||
*/
|
*/
|
||||||
uint64_t sos_vm_hpa2gpa(uint64_t hpa);
|
uint64_t service_vm_hpa2gpa(uint64_t hpa);
|
||||||
/**
|
/**
|
||||||
* @brief Guest-physical memory region mapping
|
* @brief Guest-physical memory region mapping
|
||||||
*
|
*
|
||||||
|
@ -215,13 +215,13 @@ static inline struct acrn_vcpu *vcpu_from_pid(struct acrn_vm *vm, uint16_t pcpu_
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Convert relative vm id to absolute vm id */
|
/* Convert relative vm id to absolute vm id */
|
||||||
static inline uint16_t rel_vmid_2_vmid(uint16_t sos_vmid, uint16_t rel_vmid) {
|
static inline uint16_t rel_vmid_2_vmid(uint16_t service_vmid, uint16_t rel_vmid) {
|
||||||
return (sos_vmid + rel_vmid);
|
return (service_vmid + rel_vmid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Convert absolute vm id to relative vm id */
|
/* Convert absolute vm id to relative vm id */
|
||||||
static inline uint16_t vmid_2_rel_vmid(uint16_t sos_vmid, uint16_t vmid) {
|
static inline uint16_t vmid_2_rel_vmid(uint16_t service_vmid, uint16_t vmid) {
|
||||||
return (vmid - sos_vmid);
|
return (vmid - service_vmid);
|
||||||
}
|
}
|
||||||
|
|
||||||
void make_shutdown_vm_request(uint16_t pcpu_id);
|
void make_shutdown_vm_request(uint16_t pcpu_id);
|
||||||
@ -243,9 +243,9 @@ bool is_postlaunched_vm(const struct acrn_vm *vm);
|
|||||||
bool is_prelaunched_vm(const struct acrn_vm *vm);
|
bool is_prelaunched_vm(const struct acrn_vm *vm);
|
||||||
uint16_t get_vmid_by_uuid(const uint8_t *uuid);
|
uint16_t get_vmid_by_uuid(const uint8_t *uuid);
|
||||||
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id);
|
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id);
|
||||||
struct acrn_vm *get_sos_vm(void);
|
struct acrn_vm *get_service_vm(void);
|
||||||
|
|
||||||
void create_sos_vm_e820(struct acrn_vm *vm);
|
void create_service_vm_e820(struct acrn_vm *vm);
|
||||||
void create_prelaunched_vm_e820(struct acrn_vm *vm);
|
void create_prelaunched_vm_e820(struct acrn_vm *vm);
|
||||||
uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_addr, uint64_t max_addr);
|
uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_addr, uint64_t max_addr);
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ void allow_guest_pio_access(struct acrn_vm *vm, uint16_t port_address, uint32_
|
|||||||
void deny_guest_pio_access(struct acrn_vm *vm, uint16_t port_address, uint32_t nbytes);
|
void deny_guest_pio_access(struct acrn_vm *vm, uint16_t port_address, uint32_t nbytes);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Fire HSM interrupt to SOS
|
* @brief Fire HSM interrupt to Service VM
|
||||||
*
|
*
|
||||||
* @return None
|
* @return None
|
||||||
*/
|
*/
|
||||||
|
@ -179,7 +179,7 @@ struct acrn_vm_config {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* below are variable length members (per build).
|
* below are variable length members (per build).
|
||||||
* SOS can get the vm_configs[] array through hypercall, but SOS may not
|
* Service VM can get the vm_configs[] array through hypercall, but Service VM may not
|
||||||
* need to parse these members.
|
* need to parse these members.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ bool is_hypercall_from_ring0(void);
|
|||||||
* @pre is_service_vm(vcpu->vm)
|
* @pre is_service_vm(vcpu->vm)
|
||||||
* @return 0 on success, non-zero on error.
|
* @return 0 on success, non-zero on error.
|
||||||
*/
|
*/
|
||||||
int32_t hcall_sos_offline_cpu(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
|
int32_t hcall_service_vm_offline_cpu(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Get hypervisor api version
|
* @brief Get hypervisor api version
|
||||||
|
@ -172,7 +172,7 @@ struct mem_io_node {
|
|||||||
/* External Interfaces */
|
/* External Interfaces */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Deliver \p io_req to SOS and suspend \p vcpu till its completion
|
* @brief Deliver \p io_req to Service VM and suspend \p vcpu till its completion
|
||||||
*
|
*
|
||||||
* @param vcpu The virtual CPU that triggers the MMIO access
|
* @param vcpu The virtual CPU that triggers the MMIO access
|
||||||
* @param io_req The I/O request holding the details of the MMIO access
|
* @param io_req The I/O request holding the details of the MMIO access
|
||||||
|
@ -44,7 +44,7 @@
|
|||||||
#define VIOAPIC_BASE 0xFEC00000UL
|
#define VIOAPIC_BASE 0xFEC00000UL
|
||||||
#define VIOAPIC_SIZE 4096UL
|
#define VIOAPIC_SIZE 4096UL
|
||||||
|
|
||||||
#define REDIR_ENTRIES_HW 120U /* SOS align with native ioapic */
|
#define REDIR_ENTRIES_HW 120U /* Service VM align with native ioapic */
|
||||||
#define STATE_BITMAP_SIZE INT_DIV_ROUNDUP(REDIR_ENTRIES_HW, 64U)
|
#define STATE_BITMAP_SIZE INT_DIV_ROUNDUP(REDIR_ENTRIES_HW, 64U)
|
||||||
|
|
||||||
#define IOAPIC_RTE_LOW_INTVEC ((uint32_t)IOAPIC_RTE_INTVEC)
|
#define IOAPIC_RTE_LOW_INTVEC ((uint32_t)IOAPIC_RTE_INTVEC)
|
||||||
|
@ -140,12 +140,12 @@ struct pci_vdev {
|
|||||||
const struct pci_vdev_ops *vdev_ops;
|
const struct pci_vdev_ops *vdev_ops;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vdev in | HV | pre-VM | SOS | post-VM
|
* vdev in | HV | pre-VM | Service VM | post-VM
|
||||||
* | | |vdev used by SOS|vdev used by post-VM|
|
* | | |vdev used by Service VM|vdev used by post-VM|
|
||||||
* -----------------------------------------------------------------------------------------------
|
* ----------------------------------------------------------------------------------------------------------
|
||||||
* parent_user| NULL(HV) | NULL(HV) | NULL(HV) | NULL(HV) | vdev in SOS
|
* parent_user| NULL(HV) | NULL(HV) | NULL(HV) | NULL(HV) | vdev in Service VM
|
||||||
* -----------------------------------------------------------------------------------------------
|
* ----------------------------------------------------------------------------------------------------------
|
||||||
* user | vdev in HV | vdev in pre-VM | vdev in SOS | vdev in post-VM | vdev in post-VM
|
* user | vdev in HV | vdev in pre-VM | vdev in Service VM | vdev in post-VM | vdev in post-VM
|
||||||
*/
|
*/
|
||||||
struct pci_vdev *parent_user;
|
struct pci_vdev *parent_user;
|
||||||
struct pci_vdev *user; /* NULL means this device is not used or is a zombie VF */
|
struct pci_vdev *user; /* NULL means this device is not used or is a zombie VF */
|
||||||
|
@ -193,11 +193,11 @@ struct acrn_pci_request {
|
|||||||
* FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
|
* FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
|
||||||
*
|
*
|
||||||
* When a request is in COMPLETE or FREE state, the request is owned by the
|
* When a request is in COMPLETE or FREE state, the request is owned by the
|
||||||
* hypervisor. SOS (HSM or DM) shall not read or write the internals of the
|
* hypervisor. Service VM (HSM or DM) shall not read or write the internals of the
|
||||||
* request except the state.
|
* request except the state.
|
||||||
*
|
*
|
||||||
* When a request is in PENDING or PROCESSING state, the request is owned by
|
* When a request is in PENDING or PROCESSING state, the request is owned by
|
||||||
* SOS. The hypervisor shall not read or write the request other than the state.
|
* Service VM. The hypervisor shall not read or write the request other than the state.
|
||||||
*
|
*
|
||||||
* Based on the rules above, a typical I/O request lifecycle should looks like
|
* Based on the rules above, a typical I/O request lifecycle should looks like
|
||||||
* the following.
|
* the following.
|
||||||
@ -205,7 +205,7 @@ struct acrn_pci_request {
|
|||||||
* @verbatim embed:rst:leading-asterisk
|
* @verbatim embed:rst:leading-asterisk
|
||||||
*
|
*
|
||||||
* +-----------------------+-------------------------+----------------------+
|
* +-----------------------+-------------------------+----------------------+
|
||||||
* | SOS vCPU 0 | SOS vCPU x | UOS vCPU y |
|
* | Service VM vCPU 0 | Service VM vCPU x | User VM vCPU y |
|
||||||
* +=======================+=========================+======================+
|
* +=======================+=========================+======================+
|
||||||
* | | | Hypervisor: |
|
* | | | Hypervisor: |
|
||||||
* | | | |
|
* | | | |
|
||||||
@ -215,7 +215,7 @@ struct acrn_pci_request {
|
|||||||
* | | | - Set state to |
|
* | | | - Set state to |
|
||||||
* | | | PENDING (a) |
|
* | | | PENDING (a) |
|
||||||
* | | | - Fire upcall to |
|
* | | | - Fire upcall to |
|
||||||
* | | | SOS vCPU 0 |
|
* | | | Service VM vCPU 0 |
|
||||||
* | | | |
|
* | | | |
|
||||||
* +-----------------------+-------------------------+----------------------+
|
* +-----------------------+-------------------------+----------------------+
|
||||||
* | HSM: | | |
|
* | HSM: | | |
|
||||||
@ -268,7 +268,7 @@ struct acrn_pci_request {
|
|||||||
* the hypervisor, as the hypervisor shall not access the request any more.
|
* the hypervisor, as the hypervisor shall not access the request any more.
|
||||||
*
|
*
|
||||||
* 2. Due to similar reasons, setting state to COMPLETE is the last operation
|
* 2. Due to similar reasons, setting state to COMPLETE is the last operation
|
||||||
* of request handling in HSM or clients in SOS.
|
* of request handling in HSM or clients in Service VM.
|
||||||
*/
|
*/
|
||||||
struct acrn_io_request {
|
struct acrn_io_request {
|
||||||
/**
|
/**
|
||||||
@ -578,7 +578,7 @@ struct acrn_intr_monitor {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* PRE_LAUNCHED_VM is launched by ACRN hypervisor, with LAPIC_PT;
|
* PRE_LAUNCHED_VM is launched by ACRN hypervisor, with LAPIC_PT;
|
||||||
* SOS_VM is launched by ACRN hypervisor, without LAPIC_PT;
|
* Service VM is launched by ACRN hypervisor, without LAPIC_PT;
|
||||||
* POST_LAUNCHED_VM is launched by ACRN devicemodel, with/without LAPIC_PT depends on usecases.
|
* POST_LAUNCHED_VM is launched by ACRN devicemodel, with/without LAPIC_PT depends on usecases.
|
||||||
*
|
*
|
||||||
* Assumption: vm_configs array is completely initialized w.r.t. load_order member of
|
* Assumption: vm_configs array is completely initialized w.r.t. load_order member of
|
||||||
@ -587,7 +587,7 @@ struct acrn_intr_monitor {
|
|||||||
enum acrn_vm_load_order {
|
enum acrn_vm_load_order {
|
||||||
PRE_LAUNCHED_VM = 0,
|
PRE_LAUNCHED_VM = 0,
|
||||||
SOS_VM,
|
SOS_VM,
|
||||||
POST_LAUNCHED_VM, /* Launched by Devicemodel in SOS_VM */
|
POST_LAUNCHED_VM, /* Launched by Devicemodel in Service VM */
|
||||||
MAX_LOAD_ORDER
|
MAX_LOAD_ORDER
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
/* general */
|
/* general */
|
||||||
#define HC_ID_GEN_BASE 0x0UL
|
#define HC_ID_GEN_BASE 0x0UL
|
||||||
#define HC_GET_API_VERSION BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x00UL)
|
#define HC_GET_API_VERSION BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x00UL)
|
||||||
#define HC_SOS_OFFLINE_CPU BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x01UL)
|
#define HC_SERVICE_VM_OFFLINE_CPU BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x01UL)
|
||||||
#define HC_SET_CALLBACK_VECTOR BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x02UL)
|
#define HC_SET_CALLBACK_VECTOR BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x02UL)
|
||||||
#define HC_GET_PLATFORM_INFO BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x03UL)
|
#define HC_GET_PLATFORM_INFO BASE_HC_ID(HC_ID, HC_ID_GEN_BASE + 0x03UL)
|
||||||
|
|
||||||
@ -131,8 +131,8 @@ struct vm_memory_region {
|
|||||||
/** the beginning guest physical address of the memory reion*/
|
/** the beginning guest physical address of the memory reion*/
|
||||||
uint64_t gpa;
|
uint64_t gpa;
|
||||||
|
|
||||||
/** SOS_VM's guest physcial address which gpa will be mapped to */
|
/** Service VM's guest physcial address which gpa will be mapped to */
|
||||||
uint64_t sos_vm_gpa;
|
uint64_t service_vm_gpa;
|
||||||
|
|
||||||
/** size of the memory region */
|
/** size of the memory region */
|
||||||
uint64_t size;
|
uint64_t size;
|
||||||
@ -337,7 +337,7 @@ struct acrn_platform_info {
|
|||||||
*/
|
*/
|
||||||
uint64_t vm_configs_addr;
|
uint64_t vm_configs_addr;
|
||||||
|
|
||||||
/** Maximum Kata container number in SOS VM */
|
/** Maximum Kata container number in Service VM */
|
||||||
uint64_t max_kata_containers;
|
uint64_t max_kata_containers;
|
||||||
/** Align the size of Configuration info to 128Bytes. */
|
/** Align the size of Configuration info to 128Bytes. */
|
||||||
uint8_t reserved[104];
|
uint8_t reserved[104];
|
||||||
|
Loading…
Reference in New Issue
Block a user