mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-11-12 16:33:13 +00:00
hv: riscv: add access usr memory interface
Add pre_user_access() and post_user_access() interfaces to protect mmio from hypervisor accidental access, SMAP extensiton need to be detected Tracked-On: #8831 Signed-off-by: hangliu1 <hang1.liu@intel.com> Reviewed-by: Liu, Yifan1 <yifan1.liu@intel.com> Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
@@ -116,6 +116,10 @@ static void init_hv_mapping(void)
|
||||
{
|
||||
ppt_mmu_top_addr = (uint64_t *)alloc_page(&ppt_page_pool);
|
||||
|
||||
/*TODO: The SUM bit in sstatus is 0, meaning SMAP is enabled
|
||||
* however, SMAP is provided by smepmp extension, we need to detect
|
||||
* its existence from DTS.
|
||||
*/
|
||||
pgtable_add_map((uint64_t *)ppt_mmu_top_addr, get_board_hv_device_start(),
|
||||
get_board_hv_device_start(), get_board_hv_device_size(),
|
||||
PAGE_V | PAGE_R | PAGE_W,
|
||||
|
||||
@@ -338,9 +338,9 @@ static uint16_t get_pcpu_id_from_lapic_id(uint32_t lapic_id)
|
||||
void arch_start_pcpu(uint16_t pcpu_id)
|
||||
{
|
||||
/* Update the stack for pcpu */
|
||||
stac();
|
||||
pre_user_access();
|
||||
write_trampoline_stack_sym(pcpu_id);
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
/* Using the MFENCE to make sure trampoline code
|
||||
* has been updated (clflush) into memory beforing start APs.
|
||||
@@ -433,9 +433,9 @@ void arch_cpu_dead(void)
|
||||
/* clean up native stuff */
|
||||
vmx_off();
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
flush_cache_range((void *)get_hv_image_base(), get_hv_image_size());
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
/* Set state to show CPU is dead */
|
||||
pcpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
|
||||
|
||||
@@ -421,9 +421,9 @@ void ept_flush_leaf_page(uint64_t *pge, uint64_t size)
|
||||
* For end_hpa > sw_sram_top, flush [base_hpa, sw_sram_bottom) first,
|
||||
* flush [sw_sram_top, end_hpa) in the next if condition
|
||||
*/
|
||||
stac();
|
||||
pre_user_access();
|
||||
flush_cache_range(hpa2hva(base_hpa), min(end_hpa, sw_sram_bottom) - base_hpa);
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
|
||||
if (end_hpa > sw_sram_top) {
|
||||
@@ -433,9 +433,9 @@ void ept_flush_leaf_page(uint64_t *pge, uint64_t size)
|
||||
* For base_hpa < sw_sram_bottom, flush [sw_sram_top, end_hpa) here,
|
||||
* flush [base_hpa, sw_sram_bottom) in the below if condition
|
||||
*/
|
||||
stac();
|
||||
pre_user_access();
|
||||
flush_cache_range(hpa2hva(max(base_hpa, sw_sram_top)), end_hpa - max(base_hpa, sw_sram_top));
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ static int32_t local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_wa
|
||||
} else {
|
||||
addr = pw_info->top_entry;
|
||||
i = pw_info->level;
|
||||
stac();
|
||||
pre_user_access();
|
||||
|
||||
while ((i != 0U) && (fault == 0)) {
|
||||
i--;
|
||||
@@ -159,7 +159,7 @@ static int32_t local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_wa
|
||||
*gpa = entry | (gva & (page_size - 1UL));
|
||||
}
|
||||
|
||||
clac();
|
||||
post_user_access();
|
||||
if (fault != 0) {
|
||||
ret = -EFAULT;
|
||||
*err_code |= PAGE_FAULT_P_FLAG;
|
||||
@@ -181,9 +181,9 @@ static int32_t local_gva2gpa_pae(struct acrn_vcpu *vcpu, struct page_walk_info *
|
||||
base = (uint64_t *)gpa2hva(vcpu->vm, addr);
|
||||
if (base != NULL) {
|
||||
index = (uint32_t)gva >> 30U;
|
||||
stac();
|
||||
pre_user_access();
|
||||
entry = base[index];
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
if ((entry & PAGE_PRESENT) != 0U) {
|
||||
pw_info->level = 2U;
|
||||
@@ -294,13 +294,13 @@ static inline uint32_t local_copy_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t
|
||||
|
||||
g_ptr = hpa2hva(hpa);
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
if (cp_from_vm) {
|
||||
(void)memcpy_s(h_ptr, len, g_ptr, len);
|
||||
} else {
|
||||
(void)memcpy_s(g_ptr, len, h_ptr, len);
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
|
||||
return len;
|
||||
|
||||
@@ -73,7 +73,7 @@ hyperv_setup_tsc_page(const struct acrn_vcpu *vcpu, uint64_t val)
|
||||
if (ref_tsc_page->enabled == 1U) {
|
||||
p = (struct HV_REFERENCE_TSC_PAGE *)gpa2hva(vcpu->vm, ref_tsc_page->gpfn << PAGE_SHIFT);
|
||||
if (p != NULL) {
|
||||
stac();
|
||||
pre_user_access();
|
||||
p->tsc_scale = vcpu->vm->arch_vm.hyperv.tsc_scale;
|
||||
p->tsc_offset = vcpu->vm->arch_vm.hyperv.tsc_offset;
|
||||
cpu_write_memory_barrier();
|
||||
@@ -82,7 +82,7 @@ hyperv_setup_tsc_page(const struct acrn_vcpu *vcpu, uint64_t val)
|
||||
tsc_seq = 1U;
|
||||
}
|
||||
p->tsc_sequence = tsc_seq;
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -134,14 +134,14 @@ hyperv_setup_hypercall_page(const struct acrn_vcpu *vcpu, uint64_t val)
|
||||
page_gpa = hypercall.gpfn << PAGE_SHIFT;
|
||||
page_hva = gpa2hva(vcpu->vm, page_gpa);
|
||||
if (page_hva != NULL) {
|
||||
stac();
|
||||
pre_user_access();
|
||||
(void)memset(page_hva, 0U, PAGE_SIZE);
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||
(void)memcpy_s(page_hva, 8U, inst64, 8U);
|
||||
} else {
|
||||
(void)memcpy_s(page_hva, 11U, inst32, 11U);
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,9 +179,9 @@ static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t
|
||||
/* Save the wakeup vec set by guest OS. Will return to guest
|
||||
* with this wakeup vec as entry.
|
||||
*/
|
||||
stac();
|
||||
pre_user_access();
|
||||
guest_wakeup_vec32 = *(vm->pm.sx_state_data->wake_vector_32);
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
pause_vm(vm); /* pause Service VM before suspend system */
|
||||
host_enter_s3(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
|
||||
|
||||
@@ -85,9 +85,9 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
|
||||
if (vm->arch_vm.sworld_eptp != NULL) {
|
||||
if (need_clr_mem) {
|
||||
/* clear trusty memory space */
|
||||
stac();
|
||||
pre_user_access();
|
||||
(void)memset(hpa2hva(hpa), 0U, (size_t)size);
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
|
||||
ept_del_mr(vm, vm->arch_vm.sworld_eptp, gpa_user_vm, size);
|
||||
@@ -295,13 +295,13 @@ static bool setup_trusty_info(struct acrn_vcpu *vcpu, uint32_t mem_size, uint64_
|
||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.cpu_regs.regs.rdi
|
||||
= (uint64_t)TRUSTY_EPT_REBASE_GPA + sizeof(struct trusty_key_info);
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
mem = (struct trusty_mem *)(hpa2hva(mem_base_hpa));
|
||||
(void)memcpy_s((void *)&mem->first_page.key_info, sizeof(struct trusty_key_info),
|
||||
&key_info, sizeof(key_info));
|
||||
(void)memcpy_s((void *)&mem->first_page.startup_param, sizeof(struct trusty_startup_param),
|
||||
&startup_param, sizeof(startup_param));
|
||||
clac();
|
||||
post_user_access();
|
||||
success = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -405,7 +405,7 @@ bool handle_l2_ept_violation(struct acrn_vcpu *vcpu)
|
||||
ASSERT(desc != NULL, "Invalid shadow EPTP!");
|
||||
|
||||
spinlock_obtain(&vept_desc_bucket_lock);
|
||||
stac();
|
||||
pre_user_access();
|
||||
|
||||
p_shadow_ept_page = (uint64_t *)(desc->shadow_eptp & PAGE_MASK);
|
||||
p_guest_ept_page = gpa2hva(vcpu->vm, desc->guest_eptp & PAGE_MASK);
|
||||
@@ -479,7 +479,7 @@ bool handle_l2_ept_violation(struct acrn_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
clac();
|
||||
post_user_access();
|
||||
spinlock_release(&vept_desc_bucket_lock);
|
||||
|
||||
return is_l1_vmexit;
|
||||
|
||||
@@ -180,12 +180,12 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_
|
||||
{
|
||||
uint64_t pmain_entry_saved;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
|
||||
/* set ACRN wakeup vec instead */
|
||||
*(sstate_data->wake_vector_32) = (uint32_t)get_trampoline_start16_paddr();
|
||||
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
/* Save TSC on all PCPU */
|
||||
smp_call_function(get_active_pcpu_bitmap(), suspend_tsc, NULL);
|
||||
@@ -193,7 +193,7 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_
|
||||
/* offline all APs */
|
||||
stop_pcpus();
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
/* Save default main entry and we will restore it after
|
||||
* back from S3. So the AP online could jmp to correct
|
||||
* main entry.
|
||||
@@ -202,7 +202,7 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_
|
||||
|
||||
/* Set the main entry for resume from S3 state */
|
||||
write_trampoline_sym(main_entry, (uint64_t)restore_s3_context);
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
local_irq_disable();
|
||||
vmx_off();
|
||||
@@ -225,9 +225,9 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_
|
||||
local_irq_enable();
|
||||
|
||||
/* restore the default main entry */
|
||||
stac();
|
||||
pre_user_access();
|
||||
write_trampoline_sym(main_entry, pmain_entry_saved);
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
/* online all APs again */
|
||||
if (!start_pcpus(AP_MASK)) {
|
||||
|
||||
@@ -57,11 +57,11 @@ static void *get_initrd_load_addr(struct acrn_vm *vm, uint64_t kernel_start)
|
||||
* Per kernel src head_64.S, decompressed kernel start at 2M aligned to the
|
||||
* compressed kernel load address.
|
||||
*/
|
||||
stac();
|
||||
pre_user_access();
|
||||
kernel_init_size = zeropage->hdr.init_size;
|
||||
kernel_align = zeropage->hdr.kernel_alignment;
|
||||
initrd_addr_max = zeropage->hdr.initrd_addr_max;
|
||||
clac();
|
||||
post_user_access();
|
||||
kernel_end = roundup(kernel_start, kernel_align) + kernel_init_size;
|
||||
|
||||
if (initrd_addr_max != 0U) {
|
||||
@@ -137,7 +137,7 @@ static void *get_bzimage_kernel_load_addr(struct acrn_vm *vm)
|
||||
*/
|
||||
zeropage = (struct zero_page *)sw_info->kernel_info.kernel_src_addr;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
if ((is_service_vm(vm)) && (zeropage->hdr.relocatable_kernel != 0U)) {
|
||||
uint64_t mods_start, mods_end;
|
||||
uint64_t kernel_load_gpa = INVALID_GPA;
|
||||
@@ -172,7 +172,7 @@ static void *get_bzimage_kernel_load_addr(struct acrn_vm *vm)
|
||||
pr_err("Non-relocatable kernel found, risk to boot!");
|
||||
}
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
if (load_addr == NULL) {
|
||||
pr_err("Could not get kernel load addr of VM %d .", vm->vm_id);
|
||||
@@ -272,7 +272,7 @@ static uint64_t create_zero_page(struct acrn_vm *vm, uint64_t load_params_gpa)
|
||||
hva = (struct zero_page *)gpa2hva(vm, gpa);
|
||||
zeropage = hva;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
/* clear the zeropage */
|
||||
(void)memset(zeropage, 0U, MEM_4K);
|
||||
|
||||
@@ -321,7 +321,7 @@ static uint64_t create_zero_page(struct acrn_vm *vm, uint64_t load_params_gpa)
|
||||
|
||||
/* Create/add e820 table entries in zeropage */
|
||||
zeropage->e820_nentries = (uint8_t)create_zeropage_e820(zeropage, vm);
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
/* Return Physical Base Address of zeropage */
|
||||
return gpa;
|
||||
@@ -347,9 +347,9 @@ static void load_bzimage(struct acrn_vm *vm, struct acrn_vcpu *vcpu,
|
||||
* The compressed proteced mode code start at offset (setup_sectors + 1U) * 512U of bzImage.
|
||||
* Only protected mode code need to be loaded.
|
||||
*/
|
||||
stac();
|
||||
pre_user_access();
|
||||
setup_sectors = (zeropage->hdr.setup_sects == 0U) ? 4U : zeropage->hdr.setup_sects;
|
||||
clac();
|
||||
post_user_access();
|
||||
prot_code_offset = (uint32_t)(setup_sectors + 1U) * 512U;
|
||||
prot_code_size = (sw_kernel->kernel_size > prot_code_offset) ?
|
||||
(sw_kernel->kernel_size - prot_code_offset) : 0U;
|
||||
|
||||
@@ -76,7 +76,7 @@ uint32_t prepare_loader_name(struct acrn_vm *vm, uint64_t param_ldrname_gpa)
|
||||
|
||||
/**
|
||||
* @pre vm != NULL
|
||||
* must run in stac/clac context
|
||||
* must run in pre_user_access/post_user_access context
|
||||
*/
|
||||
static void *do_load_elf64(struct acrn_vm *vm)
|
||||
{
|
||||
@@ -109,9 +109,9 @@ static void *do_load_elf64(struct acrn_vm *vm)
|
||||
*/
|
||||
(void)copy_to_gpa(vm, p_elf_img + p_prg_tbl_head64->p_offset,
|
||||
p_prg_tbl_head64->p_paddr, (uint32_t)p_prg_tbl_head64->p_filesz);
|
||||
/* copy_to_gpa has its own stac/clac inside. Call stac again here to keep
|
||||
/* copy_to_gpa has its own pre_user_access/post_user_access inside. Call pre_user_access again here to keep
|
||||
* the context. */
|
||||
stac();
|
||||
pre_user_access();
|
||||
}
|
||||
p_prg_tbl_head64++;
|
||||
}
|
||||
@@ -142,7 +142,7 @@ static void *do_load_elf64(struct acrn_vm *vm)
|
||||
|
||||
/**
|
||||
* @pre vm != NULL
|
||||
* must run in stac/clac context
|
||||
* must run in pre_user_access/post_user_access context
|
||||
*/
|
||||
static void *do_load_elf32(struct acrn_vm *vm)
|
||||
{
|
||||
@@ -175,9 +175,9 @@ static void *do_load_elf32(struct acrn_vm *vm)
|
||||
*/
|
||||
(void)copy_to_gpa(vm, p_elf_img + p_prg_tbl_head32->p_offset,
|
||||
p_prg_tbl_head32->p_paddr, p_prg_tbl_head32->p_filesz);
|
||||
/* copy_to_gpa has its own stac/clac inside. Call stac again here to keep
|
||||
/* copy_to_gpa has its own pre_user_access/post_user_access inside. Call pre_user_access again here to keep
|
||||
* the context. */
|
||||
stac();
|
||||
pre_user_access();
|
||||
}
|
||||
p_prg_tbl_head32++;
|
||||
}
|
||||
@@ -216,7 +216,7 @@ static int32_t load_elf(struct acrn_vm *vm)
|
||||
void *p_elf_img = (void *)sw_kernel->kernel_src_addr;
|
||||
int32_t ret = 0;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
|
||||
if (*(uint32_t *)p_elf_img == ELFMAGIC) {
|
||||
if (*(uint8_t *)(p_elf_img + EI_CLASS) == ELFCLASS64) {
|
||||
@@ -230,7 +230,7 @@ static int32_t load_elf(struct acrn_vm *vm)
|
||||
pr_err("%s, booting elf but no elf header found!", __func__);
|
||||
}
|
||||
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
sw_kernel->kernel_entry_addr = elf_entry;
|
||||
|
||||
@@ -292,14 +292,14 @@ int32_t elf_loader(struct acrn_vm *vm)
|
||||
/* We boot ELF Image from protected mode directly */
|
||||
init_vcpu_protect_mode_regs(vcpu, load_params_gpa +
|
||||
offsetof(struct elf_boot_para, init_gdt));
|
||||
stac();
|
||||
pre_user_access();
|
||||
mb_hdr = find_img_multiboot_header(vm);
|
||||
clac();
|
||||
post_user_access();
|
||||
if (mb_hdr != NULL) {
|
||||
uint32_t mmap_length = 0U;
|
||||
struct multiboot_info mb_info;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
if ((mb_hdr->flags & MULTIBOOT_HEADER_NEED_MEMINFO) != 0U) {
|
||||
mmap_length = prepare_multiboot_mmap(vm, load_params_gpa +
|
||||
offsetof(struct elf_boot_para, mmap));
|
||||
@@ -336,7 +336,7 @@ int32_t elf_loader(struct acrn_vm *vm)
|
||||
offsetof(struct elf_boot_para, mb_info));
|
||||
/* other vcpu regs should have satisfied multiboot requirement already. */
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
/*
|
||||
* elf_loader need support non-multiboot header image
|
||||
|
||||
@@ -220,9 +220,9 @@ int32_t init_vm_boot_info(struct acrn_vm *vm)
|
||||
struct acrn_boot_info *abi = get_acrn_boot_info();
|
||||
int32_t ret = -EINVAL;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
ret = init_vm_sw_load(vm, abi);
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1206,7 +1206,7 @@ int32_t hcall_vm_intr_monitor(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
|
||||
hpa = gpa2hpa(vm, param2);
|
||||
if (hpa != INVALID_HPA) {
|
||||
intr_hdr = (struct acrn_intr_monitor *)hpa2hva(hpa);
|
||||
stac();
|
||||
pre_user_access();
|
||||
if (intr_hdr->buf_cnt <= (MAX_PTDEV_NUM * 2U)) {
|
||||
status = 0;
|
||||
|
||||
@@ -1228,7 +1228,7 @@ int32_t hcall_vm_intr_monitor(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm,
|
||||
break;
|
||||
}
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data, uint32_t max_len)
|
||||
uint32_t ele_size, ret;
|
||||
bool trigger_overwrite = false;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
ele_size = sbuf->ele_size;
|
||||
next_tail = sbuf_next_ptr(sbuf->tail, ele_size, sbuf->size);
|
||||
|
||||
@@ -84,7 +84,7 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data, uint32_t max_len)
|
||||
/* there must be something wrong */
|
||||
ret = UINT32_MAX;
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ int32_t init_vm_event(struct acrn_vm *vm, uint64_t *hva)
|
||||
struct shared_buf *sbuf = (struct shared_buf *)hva;
|
||||
int ret = -1;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
if (sbuf != NULL) {
|
||||
if (sbuf->magic == SBUF_MAGIC) {
|
||||
vm->sw.vm_event_sbuf = sbuf;
|
||||
@@ -24,7 +24,7 @@ int32_t init_vm_event(struct acrn_vm *vm, uint64_t *hva)
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -257,7 +257,7 @@ static int32_t profiling_sbuf_put_variable(struct shared_buf *sbuf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
if (sbuf->tail >= sbuf->head) {
|
||||
remaining_space = sbuf->size - (sbuf->tail - sbuf->head);
|
||||
} else {
|
||||
@@ -269,7 +269,7 @@ static int32_t profiling_sbuf_put_variable(struct shared_buf *sbuf,
|
||||
* Since if the next_tail equals head, then it is assumed
|
||||
* that buffer is empty, not full
|
||||
*/
|
||||
clac();
|
||||
post_user_access();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -293,7 +293,7 @@ static int32_t profiling_sbuf_put_variable(struct shared_buf *sbuf,
|
||||
}
|
||||
|
||||
sbuf->tail = next_tail;
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
return (int32_t)size;
|
||||
}
|
||||
@@ -330,14 +330,14 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
}
|
||||
|
||||
if (ss->pmu_state == PMU_RUNNING) {
|
||||
stac();
|
||||
pre_user_access();
|
||||
if (sbuf->tail >= sbuf->head) {
|
||||
remaining_space = sbuf->size
|
||||
- (sbuf->tail - sbuf->head);
|
||||
} else {
|
||||
remaining_space = sbuf->head - sbuf->tail;
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
/* populate the data header */
|
||||
pkt_header.tsc = cpu_ticks();
|
||||
@@ -397,14 +397,14 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
|
||||
sw_lock = &(get_cpu_var(arch.profiling_info.sw_lock));
|
||||
spinlock_irqsave_obtain(sw_lock, &rflags);
|
||||
stac();
|
||||
pre_user_access();
|
||||
if (sbuf->tail >= sbuf->head) {
|
||||
remaining_space
|
||||
= sbuf->size - (sbuf->tail - sbuf->head);
|
||||
} else {
|
||||
remaining_space = sbuf->head - sbuf->tail;
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
/* populate the data header */
|
||||
pkt_header.tsc = cpu_ticks();
|
||||
|
||||
@@ -259,7 +259,7 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
|
||||
req_buf = (struct acrn_io_request_buffer *)(vcpu->vm->sw.io_shared_page);
|
||||
cur = vcpu->vcpu_id;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
acrn_io_req = &req_buf->req_slot[cur];
|
||||
/* ACRN insert request to HSM and inject upcall */
|
||||
acrn_io_req->type = io_req->io_type;
|
||||
@@ -269,7 +269,7 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
|
||||
acrn_io_req->completion_polling = 1U;
|
||||
is_polling = true;
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
/* Before updating the acrn_io_req state, enforce all fill acrn_io_req operations done */
|
||||
cpu_write_memory_barrier();
|
||||
@@ -316,10 +316,10 @@ uint32_t get_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id)
|
||||
if (req_buf == NULL) {
|
||||
state = 0xffffffffU;
|
||||
} else {
|
||||
stac();
|
||||
pre_user_access();
|
||||
acrn_io_req = &req_buf->req_slot[vcpu_id];
|
||||
state = acrn_io_req->processed;
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
|
||||
return state;
|
||||
@@ -332,7 +332,7 @@ void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t state)
|
||||
|
||||
req_buf = (struct acrn_io_request_buffer *)vm->sw.io_shared_page;
|
||||
if (req_buf != NULL) {
|
||||
stac();
|
||||
pre_user_access();
|
||||
acrn_io_req = &req_buf->req_slot[vcpu_id];
|
||||
/*
|
||||
* HV will only set processed to ACRN_IOREQ_STATE_PENDING or ACRN_IOREQ_STATE_FREE.
|
||||
@@ -341,7 +341,7 @@ void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t state)
|
||||
* It won't lead wrong processing.
|
||||
*/
|
||||
acrn_io_req->processed = state;
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -350,7 +350,7 @@ int init_asyncio(struct acrn_vm *vm, uint64_t *hva)
|
||||
struct shared_buf *sbuf = (struct shared_buf *)hva;
|
||||
int ret = -1;
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
if (sbuf != NULL) {
|
||||
if (sbuf->magic == SBUF_MAGIC) {
|
||||
vm->sw.asyncio_sbuf = sbuf;
|
||||
@@ -359,7 +359,7 @@ int init_asyncio(struct acrn_vm *vm, uint64_t *hva)
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -403,7 +403,7 @@ static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
||||
|
||||
req_buf = (struct acrn_io_request_buffer *)(vcpu->vm->sw.io_shared_page);
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
acrn_io_req = &req_buf->req_slot[vcpu->vcpu_id];
|
||||
if (io_req != NULL) {
|
||||
switch (vcpu->req.io_type) {
|
||||
@@ -426,7 +426,7 @@ static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
||||
* Only HV will set processed to ACRN_IOREQ_STATE_FREE when ioreq is done.
|
||||
*/
|
||||
acrn_io_req->processed = ACRN_IOREQ_STATE_FREE;
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -91,10 +91,10 @@ static void mask_one_msix_vector(const struct pci_vdev *vdev, uint32_t index)
|
||||
uint32_t vector_control;
|
||||
struct msix_table_entry *pentry = get_msix_table_entry(vdev, index);
|
||||
|
||||
stac();
|
||||
pre_user_access();
|
||||
vector_control = pentry->vector_control | PCIM_MSIX_VCTRL_MASK;
|
||||
mmio_write32(vector_control, (void *)&(pentry->vector_control));
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
|
||||
|
||||
@@ -127,13 +127,13 @@ static void remap_one_vmsix_entry(const struct pci_vdev *vdev, uint32_t index)
|
||||
* fields with a single QWORD write, but some hardware can accept 32 bits
|
||||
* write only
|
||||
*/
|
||||
stac();
|
||||
pre_user_access();
|
||||
mmio_write32((uint32_t)(info.addr.full), (void *)&(pentry->addr));
|
||||
mmio_write32((uint32_t)(info.addr.full >> 32U), (void *)((char *)&(pentry->addr) + 4U));
|
||||
|
||||
mmio_write32(info.data.full, (void *)&(pentry->data));
|
||||
mmio_write32(vdev->msix.table_entries[index].vector_control, (void *)&(pentry->vector_control));
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -127,13 +127,13 @@ uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req)
|
||||
} else {
|
||||
if (vdev->pdev != NULL) {
|
||||
hva = hpa2hva(vdev->msix.mmio_hpa + (mmio->address - vdev->msix.mmio_gpa));
|
||||
stac();
|
||||
pre_user_access();
|
||||
if (mmio->direction == ACRN_IOREQ_DIR_READ) {
|
||||
mmio->value = mmio_read(hva, mmio->size);
|
||||
} else {
|
||||
mmio_write(hva, mmio->size, mmio->value);
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
} else {
|
||||
if (mmio->direction == ACRN_IOREQ_DIR_READ) {
|
||||
mmio->value = 0UL;
|
||||
|
||||
@@ -104,6 +104,7 @@ struct stack_frame {
|
||||
#define BITS_PER_LONG (BYTES_PER_LONG << 3)
|
||||
/* Define the interrupt enable bit mask */
|
||||
#define SSTATUS_SIE 0x2
|
||||
#define SSTATUS_SUM 0x00040000UL
|
||||
|
||||
/* Define CPU stack alignment */
|
||||
#define CPU_STACK_ALIGN 16UL
|
||||
@@ -185,6 +186,16 @@ static inline void arch_local_irq_restore(uint64_t flags)
|
||||
asm volatile("csrs sstatus, %0 \n" ::"rK"(flags & SSTATUS_SIE) : "memory");
|
||||
}
|
||||
|
||||
static inline void arch_pre_user_access(void)
|
||||
{
|
||||
asm volatile ("csrs sstatus, %0" : : "r" (SSTATUS_SUM) : "memory");
|
||||
}
|
||||
|
||||
static inline void arch_post_user_access(void)
|
||||
{
|
||||
asm volatile ("csrc sstatus, %0" : : "r" (SSTATUS_SUM) : "memory");
|
||||
}
|
||||
|
||||
void wait_sync_change(volatile const uint64_t *sync, uint64_t wake_sync);
|
||||
void init_percpu_hart_id(uint32_t bsp_hart_id);
|
||||
uint16_t get_pcpu_id_from_hart_id(uint32_t hart_id);
|
||||
|
||||
@@ -748,27 +748,12 @@ static inline void write_xmm_0_2(uint64_t *xmm0_addr, uint64_t *xmm1_addr, uint6
|
||||
CPU_XMM_WRITE(xmm2, xmm2_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* stac/clac pair is used to access guest's memory protected by SMAP,
|
||||
* following below flow:
|
||||
*
|
||||
* stac();
|
||||
* #access guest's memory.
|
||||
* clac();
|
||||
*
|
||||
* Notes:Avoid inserting another stac/clac pair between stac and clac,
|
||||
* As once clac after multiple stac will invalidate SMAP protection
|
||||
* and hence Page Fault crash.
|
||||
* Logging message to memory buffer will induce this case,
|
||||
* please disable SMAP temporlly or don't log messages to shared
|
||||
* memory buffer, if it is evitable for you for debug purpose.
|
||||
*/
|
||||
static inline void stac(void)
|
||||
static inline void arch_pre_user_access(void)
|
||||
{
|
||||
asm volatile ("stac" : : : "memory");
|
||||
}
|
||||
|
||||
static inline void clac(void)
|
||||
static inline void arch_post_user_access(void)
|
||||
{
|
||||
asm volatile ("clac" : : : "memory");
|
||||
}
|
||||
|
||||
@@ -52,6 +52,8 @@ bool start_pcpus(uint64_t mask);
|
||||
void arch_cpu_dead(void);
|
||||
void cpu_dead(void);
|
||||
void arch_cpu_do_idle(void);
|
||||
static inline void arch_pre_user_access(void);
|
||||
static inline void arch_post_user_access(void);
|
||||
|
||||
#define ALL_CPUS_MASK ((1UL << get_pcpu_nums()) - 1UL)
|
||||
#define AP_MASK (ALL_CPUS_MASK & ~(1UL << BSP_CPU_ID))
|
||||
@@ -82,6 +84,31 @@ static inline void arch_local_irq_disable(void);
|
||||
static inline void arch_local_irq_save(uint64_t *flags_ptr);
|
||||
static inline void arch_local_irq_restore(uint64_t flags);
|
||||
|
||||
/*
|
||||
* pre_user_access/post_user_access pair is used to access guest's memory protected by SMAP,
|
||||
* following below flow:
|
||||
*
|
||||
* pre_user_access();
|
||||
* #access guest's memory.
|
||||
* post_user_access();
|
||||
*
|
||||
* Notes:Avoid inserting another pre_user_access/post_user_access pair between pre_user_access
|
||||
* and post_user_access, As once post_user_access after multiple pre_user_access will
|
||||
* invalidate SMAP protection and hence Page Fault crash.
|
||||
* Logging message to memory buffer will induce this case,
|
||||
* please disable SMAP temporlly or don't log messages to shared
|
||||
* memory buffer, if it is evitable for you for debug purpose.
|
||||
*/
|
||||
static inline void pre_user_access(void)
|
||||
{
|
||||
arch_pre_user_access();
|
||||
}
|
||||
|
||||
static inline void post_user_access(void)
|
||||
{
|
||||
arch_post_user_access();
|
||||
}
|
||||
|
||||
static inline void local_irq_enable(void)
|
||||
{
|
||||
arch_local_irq_enable();
|
||||
|
||||
@@ -90,9 +90,9 @@ void security_vm_fixup(uint16_t vm_id)
|
||||
struct acrn_vm_config *vm_config = get_vm_config(vm_id);
|
||||
|
||||
if ((vm_config->guest_flags & GUEST_FLAG_SECURITY_VM) != 0UL) {
|
||||
stac();
|
||||
pre_user_access();
|
||||
tpm2_fixup(vm_id);
|
||||
clac();
|
||||
post_user_access();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ static int efi_search_smbios_eps(EFI_SYSTEM_TABLE *efi_system_table, struct smbi
|
||||
EFI_GUID smbios2_guid = SMBIOS2_TABLE_GUID;
|
||||
|
||||
/* If both are present, SMBIOS3 takes precedence over SMBIOS */
|
||||
stac();
|
||||
pre_user_access();
|
||||
p = efi_search_guid(efi_system_table, &smbios3_guid);
|
||||
if (p != NULL) {
|
||||
get_smbios3_info((struct smbios3_entry_point *)p, si);
|
||||
@@ -182,7 +182,7 @@ static int efi_search_smbios_eps(EFI_SYSTEM_TABLE *efi_system_table, struct smbi
|
||||
get_smbios2_info((struct smbios2_entry_point *)p, si);
|
||||
}
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
return (p != NULL);
|
||||
}
|
||||
@@ -239,7 +239,7 @@ static int mem_search_smbios_eps(struct smbios_info *si)
|
||||
* for the anchor string on paragraph (16-byte) boundaries within the physical address
|
||||
* 0xf0000-0xfffff.
|
||||
*/
|
||||
stac();
|
||||
pre_user_access();
|
||||
for (p = start; p < end; p += 16) {
|
||||
if (is_smbios3_present(p)) {
|
||||
get_smbios3_info((struct smbios3_entry_point *)p, si);
|
||||
@@ -249,7 +249,7 @@ static int mem_search_smbios_eps(struct smbios_info *si)
|
||||
break;
|
||||
}
|
||||
}
|
||||
clac();
|
||||
post_user_access();
|
||||
|
||||
return (p < end);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user