ACRN: hv: Terminology modification in hv code

Rename sos_vm to service_vm.
rename sos_vmid to service_vmid.
rename sos_vm_ptr to service_vm_ptr.
rename get_sos_vm to get_service_vm.
rename sos_vm_gpa to service_vm_gpa.
rename sos_vm_e820 to service_vm_e820.
rename sos_efi_info to service_vm_efi_info.
rename sos_vm_config to service_vm_config.
rename sos_vm_hpa2gpa to service_vm_hpa2gpa.
rename vdev_in_sos to vdev_in_service_vm.
rename create_sos_vm_e820 to create_service_vm_e820.
rename sos_high64_max_ram to service_vm_high64_max_ram.
rename prepare_sos_vm_memmap to prepare_service_vm_memmap.
rename post_uos_sworld_memory to post_user_vm_sworld_memory
rename hcall_sos_offline_cpu to hcall_service_vm_offline_cpu.
rename filter_mem_from_sos_e820 to filter_mem_from_service_vm_e820.
rename create_sos_vm_efi_mmap_desc to create_service_vm_efi_mmap_desc.
rename HC_SOS_OFFLINE_CPU to HC_SERVICE_VM_OFFLINE_CPU.
rename SOS to Service VM in comments message.

Tracked-On: #6744
Signed-off-by: Liu Long <long.liu@linux.intel.com>
Reviewed-by: Geoffroy Van Cutsem <geoffroy.vancutsem@intel.com>
This commit is contained in:
Liu Long
2021-10-29 14:05:18 +08:00
committed by wenlingz
parent 26e507a06e
commit 92b7d6a9a3
45 changed files with 220 additions and 219 deletions

View File

@@ -65,7 +65,7 @@ struct acrn_vm_pci_dev_config *init_one_dev_config(struct pci_pdev *pdev)
dev_config = &vm_config->pci_devs[vm_config->pci_dev_num];
if (is_hv_owned_pdev(pdev->bdf)) {
/* SOS need to emulate the type1 pdevs owned by HV */
/* Service VM need to emulate the type1 pdevs owned by HV */
dev_config->emu_type = PCI_DEV_TYPE_SOSEMUL;
if (is_bridge(pdev)) {
dev_config->vdev_ops = &vpci_bridge_ops;

View File

@@ -16,7 +16,7 @@
/*
* e820.c contains the related e820 operations; like HV to get memory info for its MMU setup;
* and hide HV memory from SOS_VM...
* and hide HV memory from Service VM...
*/
static uint32_t hv_e820_entries_nr;
@@ -26,7 +26,7 @@ static struct e820_entry hv_e820[E820_MAX_ENTRIES];
#define DBG_LEVEL_E820 6U
/*
* @brief reserve some RAM, hide it from sos_vm, return its start address
* @brief reserve some RAM, hide it from Service VM, return its start address
* @param size_arg Amount of memory to be found and marked reserved
* @param max_addr Maximum address below which memory is to be identified
*

View File

@@ -290,7 +290,7 @@ ptirq_build_physical_rte(struct acrn_vm *vm, struct ptirq_remapping_info *entry)
/* add msix entry for a vm, based on msi id (phys_bdf+msix_index)
* - if the entry not be added by any vm, allocate it
* - if the entry already be added by sos_vm, then change the owner to current vm
* - if the entry already be added by Service VM, then change the owner to current vm
* - if the entry already be added by other vm, return NULL
*/
static struct ptirq_remapping_info *add_msix_remapping(struct acrn_vm *vm,
@@ -351,7 +351,7 @@ remove_msix_remapping(const struct acrn_vm *vm, uint16_t phys_bdf, uint32_t entr
/* add intx entry for a vm, based on intx id (phys_pin)
* - if the entry not be added by any vm, allocate it
* - if the entry already be added by sos_vm, then change the owner to current vm
* - if the entry already be added by Service VM, then change the owner to current vm
* - if the entry already be added by other vm, return NULL
*/
static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint32_t virt_gsi,
@@ -398,7 +398,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3
/*
* ptirq entry is either created or transferred from SOS VM to Post-launched VM
* ptirq entry is either created or transferred from Service VM to Post-launched VM
*/
if (entry != NULL) {
@@ -781,7 +781,7 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_gsi, enum intx_ct
}
/* @pre vm != NULL
* except sos_vm, Device Model should call this function to pre-hold ptdev intx
* except Service VM, Device Model should call this function to pre-hold ptdev intx
* entries:
* - the entry is identified by phys_pin:
* one entry vs. one phys_pin

View File

@@ -184,8 +184,8 @@ void init_ept_pgtable(struct pgtable *table, uint16_t vm_id)
}
}
/*
* To enable the identical map and support of legacy devices/ACPI method in SOS,
* ACRN presents the entire host 0-4GB memory region to SOS, except the memory
* To enable the identical map and support of legacy devices/ACPI method in Service VM,
* ACRN presents the entire host 0-4GB memory region to Service VM, except the memory
* regions explicitly assigned to pre-launched VMs or HV (DRAM and MMIO). However,
* virtual e820 only contains the known DRAM regions. For this reason,
* we can't know if the GPA range is guest valid or not, by checking with
@@ -260,9 +260,9 @@ uint64_t gpa2hpa(struct acrn_vm *vm, uint64_t gpa)
}
/**
* @pre: the gpa and hpa are identical mapping in SOS.
* @pre: the gpa and hpa are identical mapping in Service VM.
*/
uint64_t sos_vm_hpa2gpa(uint64_t hpa)
uint64_t service_vm_hpa2gpa(uint64_t hpa)
{
return hpa;
}

View File

@@ -183,7 +183,7 @@ static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t
guest_wakeup_vec32 = *(vm->pm.sx_state_data->wake_vector_32);
clac();
pause_vm(vm); /* pause sos_vm before suspend system */
pause_vm(vm); /* pause Service VM before suspend system */
host_enter_s3(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
resume_vm_from_s3(vm, guest_wakeup_vec32); /* jump back to vm */
put_vm_lock(vm);

View File

@@ -18,7 +18,7 @@
#define ENTRY_HPA1_LOW_PART2 5U
#define ENTRY_HPA1_HI 9U
static struct e820_entry sos_vm_e820[E820_MAX_ENTRIES];
static struct e820_entry service_vm_e820[E820_MAX_ENTRIES];
static struct e820_entry pre_vm_e820[PRE_VM_NUM][E820_MAX_ENTRIES];
uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_addr, uint64_t max_addr)
@@ -70,7 +70,7 @@ static void sort_vm_e820(struct acrn_vm *vm)
}
}
static void filter_mem_from_sos_e820(struct acrn_vm *vm, uint64_t start_pa, uint64_t end_pa)
static void filter_mem_from_service_vm_e820(struct acrn_vm *vm, uint64_t start_pa, uint64_t end_pa)
{
uint32_t i;
uint64_t entry_start;
@@ -79,7 +79,7 @@ static void filter_mem_from_sos_e820(struct acrn_vm *vm, uint64_t start_pa, uint
struct e820_entry *entry, new_entry = {0};
for (i = 0U; i < entries_count; i++) {
entry = &sos_vm_e820[i];
entry = &service_vm_e820[i];
entry_start = entry->baseaddr;
entry_end = entry->baseaddr + entry->length;
@@ -121,7 +121,7 @@ static void filter_mem_from_sos_e820(struct acrn_vm *vm, uint64_t start_pa, uint
if (new_entry.length > 0UL) {
entries_count++;
ASSERT(entries_count <= E820_MAX_ENTRIES, "e820 entry overflow");
entry = &sos_vm_e820[entries_count - 1U];
entry = &service_vm_e820[entries_count - 1U];
entry->baseaddr = new_entry.baseaddr;
entry->length = new_entry.length;
entry->type = new_entry.type;
@@ -131,46 +131,46 @@ static void filter_mem_from_sos_e820(struct acrn_vm *vm, uint64_t start_pa, uint
}
/**
* before boot sos_vm(service OS), call it to hide HV and prelaunched VM memory in e820 table from sos_vm
* before boot Service VM, call it to hide HV and prelaunched VM memory in e820 table from Service VM
*
* @pre vm != NULL
*/
void create_sos_vm_e820(struct acrn_vm *vm)
void create_service_vm_e820(struct acrn_vm *vm)
{
uint16_t vm_id, i;
uint64_t hv_start_pa = hva2hpa((void *)(get_hv_image_base()));
uint64_t hv_end_pa = hv_start_pa + get_hv_ram_size();
uint32_t entries_count = get_e820_entries_count();
struct acrn_vm_config *sos_vm_config = get_vm_config(vm->vm_id);
struct acrn_vm_config *service_vm_config = get_vm_config(vm->vm_id);
(void)memcpy_s((void *)sos_vm_e820, entries_count * sizeof(struct e820_entry),
(void)memcpy_s((void *)service_vm_e820, entries_count * sizeof(struct e820_entry),
(const void *)get_e820_entry(), entries_count * sizeof(struct e820_entry));
vm->e820_entry_num = entries_count;
vm->e820_entries = sos_vm_e820;
vm->e820_entries = service_vm_e820;
/* filter out hv memory from e820 table */
filter_mem_from_sos_e820(vm, hv_start_pa, hv_end_pa);
filter_mem_from_service_vm_e820(vm, hv_start_pa, hv_end_pa);
/* filter out prelaunched vm memory from e820 table */
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
struct acrn_vm_config *vm_config = get_vm_config(vm_id);
if (vm_config->load_order == PRE_LAUNCHED_VM) {
filter_mem_from_sos_e820(vm, vm_config->memory.start_hpa,
filter_mem_from_service_vm_e820(vm, vm_config->memory.start_hpa,
vm_config->memory.start_hpa + vm_config->memory.size);
/* if HPA2 is available, filter it out as well*/
if (vm_config->memory.size_hpa2 != 0UL) {
filter_mem_from_sos_e820(vm, vm_config->memory.start_hpa2,
filter_mem_from_service_vm_e820(vm, vm_config->memory.start_hpa2,
vm_config->memory.start_hpa2 + vm_config->memory.size_hpa2);
}
}
}
for (i = 0U; i < vm->e820_entry_num; i++) {
struct e820_entry *entry = &sos_vm_e820[i];
struct e820_entry *entry = &service_vm_e820[i];
if ((entry->type == E820_TYPE_RAM)) {
sos_vm_config->memory.size += entry->length;
service_vm_config->memory.size += entry->length;
}
}
sort_vm_e820(vm);

View File

@@ -38,7 +38,7 @@ static uint64_t sept_page_bitmap[TOTAL_SEPT_4K_PAGES_NUM / 64U];
/*
* @brief Reserve space for SEPT 4K pages from platform E820 table
* At moment, we only support nested VMX for SOS VM.
* At moment, we only support nested VMX for Service VM.
*/
void reserve_buffer_for_sept_pages(void)
{

View File

@@ -1889,7 +1889,7 @@ static int32_t inject_msi_for_non_lapic_pt(struct acrn_vm *vm, uint64_t addr, ui
}
/**
*@pre Pointer vm shall point to SOS_VM
*@pre Pointer vm shall point to Service VM
*/
static void inject_msi_for_lapic_pt(struct acrn_vm *vm, uint64_t addr, uint64_t data)
{

View File

@@ -46,15 +46,15 @@
/* Local variables */
/* pre-assumption: TRUSTY_RAM_SIZE is 2M aligned */
static struct page post_uos_sworld_memory[MAX_POST_VM_NUM][TRUSTY_RAM_SIZE >> PAGE_SHIFT] __aligned(MEM_2M);
static struct page post_user_vm_sworld_memory[MAX_POST_VM_NUM][TRUSTY_RAM_SIZE >> PAGE_SHIFT] __aligned(MEM_2M);
static struct acrn_vm vm_array[CONFIG_MAX_VM_NUM] __aligned(PAGE_SIZE);
static struct acrn_vm *sos_vm_ptr = NULL;
static struct acrn_vm *service_vm_ptr = NULL;
void *get_sworld_memory_base(void)
{
return post_uos_sworld_memory;
return post_user_vm_sworld_memory;
}
uint16_t get_vmid_by_uuid(const uint8_t *uuid)
@@ -229,12 +229,12 @@ struct acrn_vm *get_vm_from_vmid(uint16_t vm_id)
return &vm_array[vm_id];
}
/* return a pointer to the virtual machine structure of SOS VM */
struct acrn_vm *get_sos_vm(void)
/* return a pointer to the virtual machine structure of Service VM */
struct acrn_vm *get_service_vm(void)
{
ASSERT(sos_vm_ptr != NULL, "sos_vm_ptr is NULL");
ASSERT(service_vm_ptr != NULL, "service_vm_ptr is NULL");
return sos_vm_ptr;
return service_vm_ptr;
}
/**
@@ -319,14 +319,14 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
}
}
static void deny_pci_bar_access(struct acrn_vm *sos, const struct pci_pdev *pdev)
static void deny_pci_bar_access(struct acrn_vm *service_vm, const struct pci_pdev *pdev)
{
uint32_t idx, mask;
struct pci_vbar vbar = {};
uint64_t base = 0UL, size = 0UL;
uint64_t *pml4_page;
pml4_page = (uint64_t *)sos->arch_vm.nworld_eptp;
pml4_page = (uint64_t *)service_vm->arch_vm.nworld_eptp;
for ( idx= 0; idx < pdev->nr_bars; idx++) {
vbar.bar_type.bits = pdev->bars[idx].phy_bar;
@@ -347,31 +347,31 @@ static void deny_pci_bar_access(struct acrn_vm *sos, const struct pci_pdev *pdev
if ((base != 0UL)) {
if (is_pci_io_bar(&vbar)) {
base &= 0xffffU;
deny_guest_pio_access(sos, base, size);
deny_guest_pio_access(service_vm, base, size);
} else {
/*for passthru device MMIO BAR base must be 4K aligned. This is the requirement of passthru devices.*/
ASSERT((base & PAGE_MASK) != 0U, "%02x:%02x.%d bar[%d] 0x%lx, is not 4K aligned!",
pdev->bdf.bits.b, pdev->bdf.bits.d, pdev->bdf.bits.f, idx, base);
size = round_page_up(size);
ept_del_mr(sos, pml4_page, base, size);
ept_del_mr(service_vm, pml4_page, base, size);
}
}
}
}
}
static void deny_pdevs(struct acrn_vm *sos, struct acrn_vm_pci_dev_config *pci_devs, uint16_t pci_dev_num)
static void deny_pdevs(struct acrn_vm *service_vm, struct acrn_vm_pci_dev_config *pci_devs, uint16_t pci_dev_num)
{
uint16_t i;
for (i = 0; i < pci_dev_num; i++) {
if ( pci_devs[i].pdev != NULL) {
deny_pci_bar_access(sos, pci_devs[i].pdev);
deny_pci_bar_access(service_vm, pci_devs[i].pdev);
}
}
}
static void deny_hv_owned_devices(struct acrn_vm *sos)
static void deny_hv_owned_devices(struct acrn_vm *service_vm)
{
uint16_t pio_address;
uint32_t nbytes, i;
@@ -379,11 +379,11 @@ static void deny_hv_owned_devices(struct acrn_vm *sos)
const struct pci_pdev **hv_owned = get_hv_owned_pdevs();
for (i = 0U; i < get_hv_owned_pdev_num(); i++) {
deny_pci_bar_access(sos, hv_owned[i]);
deny_pci_bar_access(service_vm, hv_owned[i]);
}
if (get_pio_dbg_uart_cfg(&pio_address, &nbytes)) {
deny_guest_pio_access(sos, pio_address, nbytes);
deny_guest_pio_access(service_vm, pio_address, nbytes);
}
}
@@ -395,12 +395,12 @@ static void deny_hv_owned_devices(struct acrn_vm *sos)
* @pre vm != NULL
* @pre is_service_vm(vm) == true
*/
static void prepare_sos_vm_memmap(struct acrn_vm *vm)
static void prepare_service_vm_memmap(struct acrn_vm *vm)
{
uint16_t vm_id;
uint32_t i;
uint64_t hv_hpa;
uint64_t sos_high64_max_ram = MEM_4G;
uint64_t service_vm_high64_max_ram = MEM_4G;
struct acrn_vm_config *vm_config;
uint64_t *pml4_page = (uint64_t *)vm->arch_vm.nworld_eptp;
struct epc_section* epc_secs;
@@ -410,18 +410,18 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
const struct e820_entry *p_e820 = vm->e820_entries;
struct pci_mmcfg_region *pci_mmcfg;
pr_dbg("SOS_VM e820 layout:\n");
pr_dbg("Service VM e820 layout:\n");
for (i = 0U; i < entries_count; i++) {
entry = p_e820 + i;
pr_dbg("e820 table: %d type: 0x%x", i, entry->type);
pr_dbg("BaseAddress: 0x%016lx length: 0x%016lx\n", entry->baseaddr, entry->length);
if (entry->type == E820_TYPE_RAM) {
sos_high64_max_ram = max((entry->baseaddr + entry->length), sos_high64_max_ram);
service_vm_high64_max_ram = max((entry->baseaddr + entry->length), service_vm_high64_max_ram);
}
}
/* create real ept map for [0, sos_high64_max_ram) with UC */
ept_add_mr(vm, pml4_page, 0UL, 0UL, sos_high64_max_ram, EPT_RWX | EPT_UNCACHED);
/* create real ept map for [0, service_vm_high64_max_ram) with UC */
ept_add_mr(vm, pml4_page, 0UL, 0UL, service_vm_high64_max_ram, EPT_RWX | EPT_UNCACHED);
/* update ram entries to WB attr */
for (i = 0U; i < entries_count; i++) {
@@ -431,9 +431,9 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
}
}
/* Unmap all platform EPC resource from SOS.
/* Unmap all platform EPC resource from Service VM.
* This part has already been marked as reserved by BIOS in E820
* will cause EPT violation if sos accesses EPC resource.
* will cause EPT violation if Service VM accesses EPC resource.
*/
epc_secs = get_phys_epc();
for (i = 0U; (i < MAX_EPC_SECTIONS) && (epc_secs[i].size != 0UL); i++) {
@@ -441,7 +441,7 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
}
/* unmap hypervisor itself for safety
* will cause EPT violation if sos accesses hv memory
* will cause EPT violation if Service VM accesses hv memory
*/
hv_hpa = hva2hpa((void *)(get_hv_image_base()));
ept_del_mr(vm, pml4_page, hv_hpa, get_hv_ram_size());
@@ -549,8 +549,8 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
if (is_service_vm(vm)) {
/* Only for Service VM */
create_sos_vm_e820(vm);
prepare_sos_vm_memmap(vm);
create_service_vm_e820(vm);
prepare_service_vm_memmap(vm);
status = init_vm_boot_info(vm);
} else {
@@ -559,11 +559,11 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
vm->sworld_control.flag.supported = 1U;
}
if (vm->sworld_control.flag.supported != 0UL) {
uint16_t sos_vm_id = (get_sos_vm())->vm_id;
uint16_t page_idx = vmid_2_rel_vmid(sos_vm_id, vm_id) - 1U;
uint16_t service_vm_id = (get_service_vm())->vm_id;
uint16_t page_idx = vmid_2_rel_vmid(service_vm_id, vm_id) - 1U;
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
hva2hpa(post_uos_sworld_memory[page_idx]),
hva2hpa(post_user_vm_sworld_memory[page_idx]),
TRUSTY_EPT_REBASE_GPA, TRUSTY_RAM_SIZE, EPT_WB | EPT_RWX);
}
if (vm_config->name[0] == '\0') {
@@ -654,7 +654,7 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
if (status == 0) {
/* We have assumptions:
* 1) vcpus used by SOS has been offlined by DM before UOS re-use it.
* 1) vcpus used by Service VM has been offlined by DM before User VM re-use it.
* 2) pcpu_bitmap passed sanitization is OK for vcpu creating.
*/
vm->hw.cpu_affinity = pcpu_bitmap;
@@ -907,7 +907,7 @@ void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec)
reset_vcpu(bsp, POWER_ON_RESET);
/* When SOS resume from S3, it will return to real mode
/* When Service VM resume from S3, it will return to real mode
* with entry set to wakeup_vec.
*/
set_vcpu_startup_entry(bsp, wakeup_vec);
@@ -930,7 +930,7 @@ void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config)
#ifdef CONFIG_SECURITY_VM_FIXUP
security_vm_fixup(vm_id);
#endif
/* SOS and pre-launched VMs launch on all pCPUs defined in vm_config->cpu_affinity */
/* Service VM and pre-launched VMs launch on all pCPUs defined in vm_config->cpu_affinity */
err = create_vm(vm_id, vm_config->cpu_affinity, vm_config, &vm);
if (err == 0) {
@@ -940,8 +940,8 @@ void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config)
if (is_service_vm(vm)) {
/* We need to ensure all modules of pre-launched VMs have been loaded already
* before loading SOS VM modules, otherwise the module of pre-launched VMs could
* be corrupted because SOS VM kernel might pick any usable RAM to extract kernel
* before loading Service VM modules, otherwise the module of pre-launched VMs could
* be corrupted because Service VM kernel might pick any usable RAM to extract kernel
* when KASLR enabled.
* In case the pre-launched VMs aren't loaded successfuly that cause deadlock here,
* use a 10000ms timer to break the waiting loop.
@@ -990,7 +990,7 @@ void launch_vms(uint16_t pcpu_id)
if ((vm_config->load_order == SOS_VM) || (vm_config->load_order == PRE_LAUNCHED_VM)) {
if (pcpu_id == get_configured_bsp_pcpu_id(vm_config)) {
if (vm_config->load_order == SOS_VM) {
sos_vm_ptr = &vm_array[vm_id];
service_vm_ptr = &vm_array[vm_id];
}
prepare_vm(vm_id, vm_config);
}

View File

@@ -28,7 +28,7 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
io_req->reqs.pio_request.size = 2UL;
io_req->reqs.pio_request.value = (VIRTUAL_PM1A_SLP_EN | (5U << 10U));
/* Inject pm1a S5 request to SOS to shut down the guest */
/* Inject pm1a S5 request to Service VM to shut down the guest */
(void)emulate_io(vcpu, io_req);
} else {
if (is_service_vm(vm)) {
@@ -47,7 +47,7 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
}
}
/* Either SOS or pre-launched VMs */
/* Either Service VM or pre-launched VMs */
get_vm_lock(vm);
poweroff_if_rt_vm(vm);
pause_vm(vm);
@@ -101,7 +101,7 @@ static bool handle_common_reset_reg_write(struct acrn_vcpu *vcpu, bool reset)
ret = false;
} else {
/*
* If it's SOS reset while RTVM is still alive
* If it's Service VM reset while RTVM is still alive
* or pre-launched VM reset,
* ACRN doesn't support re-launch, just shutdown the guest.
*/
@@ -116,7 +116,7 @@ static bool handle_common_reset_reg_write(struct acrn_vcpu *vcpu, bool reset)
ret = false;
}
/*
* Ignore writes from SOS and pre-launched VM.
* Ignore writes from Service VM and pre-launched VM.
* Equivalent to hiding this port from the guest.
*/
}
@@ -198,7 +198,7 @@ static bool handle_reset_reg_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t
*/
void register_reset_port_handler(struct acrn_vm *vm)
{
/* Don't support SOS and pre-launched VM re-launch for now. */
/* Don't support Service VM and pre-launched VM re-launch for now. */
if (!is_postlaunched_vm(vm) || is_rt_vm(vm)) {
struct acpi_reset_reg *reset_reg = get_host_reset_reg_data();
struct acrn_acpi_generic_address *gas = &(reset_reg->reg);
@@ -215,7 +215,7 @@ void register_reset_port_handler(struct acrn_vm *vm)
register_pio_emulation_handler(vm, CF9_PIO_IDX, &io_range, handle_reset_reg_read, handle_cf9_write);
/*
* - here is taking care of SOS only:
* - here is taking care of Service VM only:
* Don't support MMIO or PCI based reset register for now.
* ACPI Spec: Register_Bit_Width must be 8 and Register_Bit_Offset must be 0.
*/

View File

@@ -20,7 +20,8 @@ struct hc_dispatch {
/* The permission_flags is a bitmap of guest flags indicating whether a VM can invoke this hypercall:
*
* - If permission_flags == 0UL (which is the default value), this hypercall can only be invoked by the SOS.
* - If permission_flags == 0UL (which is the default value), this hypercall can only be invoked by the
* Service VM.
* - Otherwise, this hypercall can only be invoked by a VM whose guest flags have ALL set bits in
* permission_flags.
*/
@@ -31,8 +32,8 @@ struct hc_dispatch {
static const struct hc_dispatch hc_dispatch_table[] = {
[HC_IDX(HC_GET_API_VERSION)] = {
.handler = hcall_get_api_version},
[HC_IDX(HC_SOS_OFFLINE_CPU)] = {
.handler = hcall_sos_offline_cpu},
[HC_IDX(HC_SERVICE_VM_OFFLINE_CPU)] = {
.handler = hcall_service_vm_offline_cpu},
[HC_IDX(HC_SET_CALLBACK_VECTOR)] = {
.handler = hcall_set_callback_vector},
[HC_IDX(HC_GET_PLATFORM_INFO)] = {
@@ -106,7 +107,7 @@ static const struct hc_dispatch hc_dispatch_table[] = {
#define GUEST_FLAGS_ALLOWING_HYPERCALLS GUEST_FLAG_SECURE_WORLD_ENABLED
struct acrn_vm *parse_target_vm(struct acrn_vm *sos_vm, uint64_t hcall_id, uint64_t param1, __unused uint64_t param2)
struct acrn_vm *parse_target_vm(struct acrn_vm *service_vm, uint64_t hcall_id, uint64_t param1, __unused uint64_t param2)
{
struct acrn_vm *target_vm = NULL;
uint16_t vm_id = ACRN_INVALID_VMID;
@@ -116,34 +117,34 @@ struct acrn_vm *parse_target_vm(struct acrn_vm *sos_vm, uint64_t hcall_id, uint6
switch (hcall_id) {
case HC_CREATE_VM:
if (copy_from_gpa(sos_vm, &cv, param1, sizeof(cv)) == 0) {
if (copy_from_gpa(service_vm, &cv, param1, sizeof(cv)) == 0) {
vm_id = get_vmid_by_uuid(&cv.uuid[0]);
}
break;
case HC_PM_GET_CPU_STATE:
vm_id = rel_vmid_2_vmid(sos_vm->vm_id, (uint16_t)((param1 & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT));
vm_id = rel_vmid_2_vmid(service_vm->vm_id, (uint16_t)((param1 & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT));
break;
case HC_VM_SET_MEMORY_REGIONS:
if (copy_from_gpa(sos_vm, &regions, param1, sizeof(regions)) == 0) {
if (copy_from_gpa(service_vm, &regions, param1, sizeof(regions)) == 0) {
/* the vmid in regions is a relative vm id, need to convert to absolute vm id */
vm_id = rel_vmid_2_vmid(sos_vm->vm_id, regions.vmid);
vm_id = rel_vmid_2_vmid(service_vm->vm_id, regions.vmid);
}
break;
case HC_GET_API_VERSION:
case HC_SOS_OFFLINE_CPU:
case HC_SERVICE_VM_OFFLINE_CPU:
case HC_SET_CALLBACK_VECTOR:
case HC_GET_PLATFORM_INFO:
case HC_SETUP_SBUF:
case HC_SETUP_HV_NPK_LOG:
case HC_PROFILING_OPS:
case HC_GET_HW_INFO:
target_vm = sos_vm;
target_vm = service_vm;
break;
default:
relative_vm_id = (uint16_t)param1;
vm_id = rel_vmid_2_vmid(sos_vm->vm_id, relative_vm_id);
vm_id = rel_vmid_2_vmid(service_vm->vm_id, relative_vm_id);
break;
}
@@ -173,7 +174,7 @@ static int32_t dispatch_hypercall(struct acrn_vcpu *vcpu)
uint64_t param2 = vcpu_get_gpreg(vcpu, CPU_REG_RSI); /* hypercall param2 from guest */
if ((permission_flags == 0UL) && is_service_vm(vm)) {
/* A permission_flags of 0 indicates that this hypercall is for SOS to manage
/* A permission_flags of 0 indicates that this hypercall is for Service VM to manage
* post-launched VMs.
*/
struct acrn_vm *target_vm = parse_target_vm(vm, hcall_id, param1, param2);
@@ -198,7 +199,7 @@ static int32_t dispatch_hypercall(struct acrn_vcpu *vcpu)
}
/*
* Pass return value to SOS by register rax.
* Pass return value to Service VM by register rax.
* This function should always return 0 since we shouldn't
* deal with hypercall error in hypervisor.
*/
@@ -213,7 +214,7 @@ int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
/*
* The following permission checks are applied to hypercalls.
*
* 1. Only SOS and VMs with specific guest flags (referred to as 'allowed VMs' hereinafter) can invoke
* 1. Only Service VM and VMs with specific guest flags (referred to as 'allowed VMs' hereinafter) can invoke
* hypercalls by executing the `vmcall` instruction. Attempts to execute the `vmcall` instruction in the
* other VMs will trigger #UD.
* 2. Attempts to execute the `vmcall` instruction from ring 1, 2 or 3 in an allowed VM will trigger #GP(0).

View File

@@ -976,7 +976,7 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
}
case MSR_IA32_BIOS_UPDT_TRIG:
{
/* We only allow SOS to do uCode update */
/* We only allow Service VM to do uCode update */
if (is_service_vm(vcpu->vm)) {
acrn_update_ucode(vcpu, v);
}

View File

@@ -115,7 +115,7 @@ void init_vmtrr(struct acrn_vcpu *vcpu)
for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
if (cap.bits.fix != 0U) {
/*
* The system firmware runs in VMX non-root mode on SOS_VM.
* The system firmware runs in VMX non-root mode on Service VM.
* In some cases, the firmware needs particular mem type
* at certain mmeory locations (e.g. UC for some
* hardware registers), so we need to configure EPT
@@ -124,7 +124,7 @@ void init_vmtrr(struct acrn_vcpu *vcpu)
vmtrr->fixed_range[i].value = msr_read(fixed_mtrr_map[i].msr);
} else {
/*
* For non-sos_vm EPT, all memory is setup with WB type in
* For non-Service VM EPT, all memory is setup with WB type in
* EPT, so we setup fixed range MTRRs accordingly.
*/
vmtrr->fixed_range[i].value = MTRR_FIXED_RANGE_ALL_WB;

View File

@@ -21,14 +21,14 @@
void arch_fire_hsm_interrupt(void)
{
/*
* use vLAPIC to inject vector to SOS vcpu 0 if vlapic is enabled
* use vLAPIC to inject vector to Service VM vcpu 0 if vlapic is enabled
* otherwise, send IPI hardcoded to BSP_CPU_ID
*/
struct acrn_vm *sos_vm;
struct acrn_vm *service_vm;
struct acrn_vcpu *vcpu;
sos_vm = get_sos_vm();
vcpu = vcpu_from_vid(sos_vm, BSP_CPU_ID);
service_vm = get_service_vm();
vcpu = vcpu_from_vid(service_vm, BSP_CPU_ID);
vlapic_set_intr(vcpu, get_hsm_notification_vector(), LAPIC_TRIG_EDGE);
}

View File

@@ -89,7 +89,7 @@ static int32_t request_notification_irq(irq_action_t func, void *data)
*/
void setup_notification(void)
{
/* support IPI notification, SOS_VM will register all CPU */
/* support IPI notification, Service VM will register all CPU */
if (request_notification_irq(kick_notification, NULL) < 0) {
pr_err("Failed to setup notification");
}

View File

@@ -98,15 +98,15 @@ void fill_seed_arg(char *cmd_dst, size_t cmd_sz)
for (i = 0U; seed_arg[i].str != NULL; i++) {
if (seed_arg[i].addr != 0UL) {
snprintf(cmd_dst, cmd_sz, "%s0x%X ", seed_arg[i].str, sos_vm_hpa2gpa(seed_arg[i].addr));
snprintf(cmd_dst, cmd_sz, "%s0x%X ", seed_arg[i].str, service_vm_hpa2gpa(seed_arg[i].addr));
if (seed_arg[i].bootloader_id == BOOTLOADER_SBL) {
struct image_boot_params *boot_params =
(struct image_boot_params *)hpa2hva(seed_arg[i].addr);
boot_params->p_seed_list = sos_vm_hpa2gpa(boot_params->p_seed_list);
boot_params->p_seed_list = service_vm_hpa2gpa(boot_params->p_seed_list);
boot_params->p_platform_info = sos_vm_hpa2gpa(boot_params->p_platform_info);
boot_params->p_platform_info = service_vm_hpa2gpa(boot_params->p_platform_info);
}
break;

View File

@@ -68,7 +68,7 @@ bool parse_seed_abl(uint64_t addr, struct physical_seed *phy_seed)
/*
* Copy out abl_seed for trusty and clear the original seed in memory.
* The SOS requires the legacy seed to derive RPMB key. So skip the
* The Service VM requires the legacy seed to derive RPMB key. So skip the
* legacy seed when clear original seed.
*/
(void)memset((void *)&phy_seed->seed_list[0U], 0U, sizeof(phy_seed->seed_list));