mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-31 15:30:56 +00:00
hv: enable SMAP in hypervisor
With SMAP enabled, hypervisor can't access pages that owned by guest(either SOS or UOS), and an override is is provided: stac()/clac() to enable/disable access to guest's memory pages. Pre-conditon: Mark hypervisor owned pages as supervisor mode (U/S = 0), and set all othter memory pages as user mode (U/S = 1). Tracked-On: #2056 Signed-off-by: Yonghua Huang <yonghua.huang@intel.com> Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
parent
57dfc7de05
commit
4fc5dcfc3e
@ -424,6 +424,8 @@ void init_cpu_post(uint16_t pcpu_id)
|
||||
|
||||
enable_smep();
|
||||
|
||||
enable_smap();
|
||||
|
||||
/* Make sure rdtsc is enabled */
|
||||
check_tsc();
|
||||
|
||||
|
@ -88,6 +88,7 @@ static int32_t local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_wa
|
||||
|
||||
addr = pw_info->top_entry;
|
||||
i = pw_info->level;
|
||||
stac();
|
||||
while (i != 0U) {
|
||||
i--;
|
||||
|
||||
@ -208,6 +209,7 @@ static int32_t local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_wa
|
||||
*gpa = entry | (gva & (page_size - 1UL));
|
||||
out:
|
||||
|
||||
clac();
|
||||
if (fault != 0) {
|
||||
ret = -EFAULT;
|
||||
*err_code |= PAGE_FAULT_P_FLAG;
|
||||
@ -347,11 +349,13 @@ static inline uint32_t local_copy_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t
|
||||
|
||||
g_ptr = hpa2hva(hpa);
|
||||
|
||||
stac();
|
||||
if (cp_from_vm) {
|
||||
(void)memcpy_s(h_ptr, len, g_ptr, len);
|
||||
} else {
|
||||
(void)memcpy_s(g_ptr, len, h_ptr, len);
|
||||
}
|
||||
clac();
|
||||
|
||||
return len;
|
||||
}
|
||||
|
@ -965,7 +965,6 @@ exception_inject:
|
||||
static int32_t emulate_movs(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
uint64_t src_gva, gpa, val = 0UL;
|
||||
uint64_t *dst_hva, *src_hva;
|
||||
uint64_t rcx, rdi, rsi, rflags;
|
||||
uint32_t err_code;
|
||||
enum cpu_reg_name seg;
|
||||
@ -1005,9 +1004,7 @@ static int32_t emulate_movs(struct acrn_vcpu *vcpu, const struct instr_emul_vie
|
||||
|
||||
/* we are sure it will success */
|
||||
(void)gva2gpa(vcpu, src_gva, &gpa, &err_code);
|
||||
src_hva = (uint64_t *)gpa2hva(vcpu->vm, gpa);
|
||||
(void)memcpy_s(&val, opsize, src_hva, opsize);
|
||||
|
||||
(void)copy_from_gpa(vcpu->vm, &val, gpa, opsize);
|
||||
vie_mmio_write(vcpu, val);
|
||||
} else {
|
||||
vie_mmio_read(vcpu, &val);
|
||||
@ -1015,8 +1012,7 @@ static int32_t emulate_movs(struct acrn_vcpu *vcpu, const struct instr_emul_vie
|
||||
/* The dest gpa is saved during dst check instruction
|
||||
* decoding.
|
||||
*/
|
||||
dst_hva = (uint64_t *)gpa2hva(vcpu->vm, vie->dst_gpa);
|
||||
(void)memcpy_s(dst_hva, opsize, &val, opsize);
|
||||
(void)copy_to_gpa(vcpu->vm, &val, vie->dst_gpa, opsize);
|
||||
}
|
||||
|
||||
rsi = vm_get_register(vcpu, CPU_REG_RSI);
|
||||
|
@ -14,6 +14,8 @@ static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
||||
struct vhm_request *vhm_req;
|
||||
|
||||
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
|
||||
|
||||
stac();
|
||||
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
|
||||
if (io_req != NULL) {
|
||||
switch (vcpu->req.type) {
|
||||
@ -26,10 +28,12 @@ static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
||||
break;
|
||||
|
||||
default:
|
||||
/*no actions are required for other cases.*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
atomic_store32(&vhm_req->processed, REQ_STATE_FREE);
|
||||
clac();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -339,6 +339,8 @@ void setup_ioapic_irqs(void)
|
||||
uint8_t pin, nr_pins;
|
||||
|
||||
addr = map_ioapic(get_ioapic_base(ioapic_id));
|
||||
hv_access_memory_region_update((uint64_t)addr, PAGE_SIZE);
|
||||
|
||||
nr_pins = ioapic_nr_pins(addr);
|
||||
for (pin = 0U; pin < nr_pins; pin++) {
|
||||
gsi_table[gsi].ioapic_id = ioapic_id;
|
||||
|
@ -234,6 +234,25 @@ void enable_smep(void)
|
||||
CPU_CR_WRITE(cr4, val64 | CR4_SMEP);
|
||||
}
|
||||
|
||||
void enable_smap(void)
|
||||
{
|
||||
uint64_t val64 = 0UL;
|
||||
|
||||
/* Enable CR4.SMAP*/
|
||||
CPU_CR_READ(cr4, &val64);
|
||||
CPU_CR_WRITE(cr4, val64 | CR4_SMAP);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update memory pages to be owned by hypervisor.
|
||||
*/
|
||||
void hv_access_memory_region_update(uint64_t base, uint64_t size)
|
||||
{
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (base & PDE_MASK),
|
||||
((size + PDE_SIZE - 1UL) & PDE_MASK), 0UL, PAGE_USER,
|
||||
&ppt_mem_ops, MR_MODIFY);
|
||||
}
|
||||
|
||||
void init_paging(void)
|
||||
{
|
||||
uint64_t hv_hpa, text_end, size;
|
||||
@ -304,6 +323,15 @@ void init_paging(void)
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (uint64_t)get_reserve_sworld_memory_base(),
|
||||
TRUSTY_RAM_SIZE * (CONFIG_MAX_VM_NUM - 1U), PAGE_USER, 0UL, &ppt_mem_ops, MR_MODIFY);
|
||||
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
/*Hypvervisor need access below memory region on UEFI platform.*/
|
||||
for (i = 0U; i < entries_count; i++) {
|
||||
entry = p_e820 + i;
|
||||
if (entry->type == E820_TYPE_ACPI_RECLAIM) {
|
||||
hv_access_memory_region_update(entry->baseaddr, entry->length);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* Enable paging */
|
||||
enable_paging();
|
||||
|
||||
|
@ -133,6 +133,7 @@ void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||
|
||||
pcpu_id = get_cpu_id();
|
||||
|
||||
stac();
|
||||
/* Save the wakeup vec set by guest. Will return to guest
|
||||
* with this wakeup vec as entry.
|
||||
*/
|
||||
@ -142,9 +143,11 @@ void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||
*vm->pm.sx_state_data->wake_vector_32 =
|
||||
(uint32_t) trampoline_start16_paddr;
|
||||
|
||||
clac();
|
||||
/* offline all APs */
|
||||
stop_cpus();
|
||||
|
||||
stac();
|
||||
/* Save default main entry and we will restore it after
|
||||
* back from S3. So the AP online could jmp to correct
|
||||
* main entry.
|
||||
@ -153,6 +156,7 @@ void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||
|
||||
/* Set the main entry for resume from S3 state */
|
||||
write_trampoline_sym(main_entry, (uint64_t)restore_s3_context);
|
||||
clac();
|
||||
|
||||
CPU_IRQ_DISABLE();
|
||||
vmx_off(pcpu_id);
|
||||
@ -176,7 +180,9 @@ void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||
CPU_IRQ_ENABLE();
|
||||
|
||||
/* restore the default main entry */
|
||||
stac();
|
||||
write_trampoline_sym(main_entry, pmain_entry_saved);
|
||||
clac();
|
||||
|
||||
/* online all APs again */
|
||||
start_cpus();
|
||||
|
@ -103,6 +103,7 @@ uint64_t prepare_trampoline(void)
|
||||
pr_dbg("trampoline code: %llx size %x", dest_pa, size);
|
||||
|
||||
/* Copy segment for AP initialization code below 1MB */
|
||||
stac();
|
||||
(void)memcpy_s(hpa2hva(dest_pa), (size_t)size, &ld_trampoline_load,
|
||||
(size_t)size);
|
||||
update_trampoline_code_refs(dest_pa);
|
||||
@ -110,6 +111,7 @@ uint64_t prepare_trampoline(void)
|
||||
for (i = 0UL; i < size; i = i + CACHE_LINE_SIZE) {
|
||||
clflush(hpa2hva(dest_pa + i));
|
||||
}
|
||||
clac();
|
||||
|
||||
trampoline_start16_paddr = dest_pa;
|
||||
|
||||
|
@ -130,7 +130,9 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
|
||||
if (vm->arch_vm.sworld_eptp != NULL) {
|
||||
if (need_clr_mem) {
|
||||
/* clear trusty memory space */
|
||||
stac();
|
||||
(void)memset(hpa2hva(hpa), 0U, (size_t)size);
|
||||
clac();
|
||||
}
|
||||
|
||||
ept_mr_del(vm, vm->arch_vm.sworld_eptp, gpa_uos, size);
|
||||
@ -364,6 +366,7 @@ static bool setup_trusty_info(struct acrn_vcpu *vcpu,
|
||||
|
||||
mem = (struct trusty_mem *)(hpa2hva(mem_base_hpa));
|
||||
|
||||
stac();
|
||||
/* copy key_info to the first page of trusty memory */
|
||||
(void)memcpy_s(&mem->first_page.key_info, sizeof(g_key_info),
|
||||
&g_key_info, sizeof(g_key_info));
|
||||
@ -381,6 +384,7 @@ static bool setup_trusty_info(struct acrn_vcpu *vcpu,
|
||||
vcpu->vm->GUID, sizeof(vcpu->vm->GUID)) == 0) {
|
||||
(void)memset(key_info, 0U, sizeof(struct trusty_key_info));
|
||||
pr_err("%s: derive dvseed failed!", __func__);
|
||||
clac();
|
||||
return false;
|
||||
}
|
||||
key_info->dseed_list[i].cse_svn = g_key_info.dseed_list[i].cse_svn;
|
||||
@ -398,6 +402,7 @@ static bool setup_trusty_info(struct acrn_vcpu *vcpu,
|
||||
mem->first_page.startup_param.mem_size = mem_size;
|
||||
mem->first_page.startup_param.tsc_per_ms = CYCLES_PER_MS;
|
||||
mem->first_page.startup_param.trusty_mem_base = TRUSTY_EPT_REBASE_GPA;
|
||||
clac();
|
||||
|
||||
/* According to trusty boot protocol, it will use RDI as the
|
||||
* address(GPA) of startup_param on boot. Currently, the startup_param
|
||||
|
@ -300,10 +300,12 @@ static void load_pdptrs(const struct acrn_vcpu *vcpu)
|
||||
/* TODO: check whether guest cr3 is valid */
|
||||
uint64_t *guest_cr3_hva = (uint64_t *)gpa2hva(vcpu->vm, guest_cr3);
|
||||
|
||||
stac();
|
||||
exec_vmwrite64(VMX_GUEST_PDPTE0_FULL, get_pgentry(guest_cr3_hva + 0UL));
|
||||
exec_vmwrite64(VMX_GUEST_PDPTE1_FULL, get_pgentry(guest_cr3_hva + 1UL));
|
||||
exec_vmwrite64(VMX_GUEST_PDPTE2_FULL, get_pgentry(guest_cr3_hva + 2UL));
|
||||
exec_vmwrite64(VMX_GUEST_PDPTE3_FULL, get_pgentry(guest_cr3_hva + 3UL));
|
||||
clac();
|
||||
}
|
||||
|
||||
static bool is_cr0_write_valid(struct acrn_vcpu *vcpu, uint64_t cr0)
|
||||
|
@ -137,7 +137,9 @@ restore_s3_context:
|
||||
pushq 0xa0 + cpu_ctx(%rip)
|
||||
popfq
|
||||
|
||||
stac
|
||||
call load_gdtr_and_tr
|
||||
clac
|
||||
call restore_msrs
|
||||
|
||||
/*
|
||||
|
@ -29,8 +29,10 @@ int32_t init_vm_boot_info(struct acrn_vm *vm)
|
||||
|
||||
mbi = hpa2hva((uint64_t)boot_regs[1]);
|
||||
|
||||
stac();
|
||||
dev_dbg(ACRN_DBG_BOOT, "Multiboot detected, flag=0x%x", mbi->mi_flags);
|
||||
if ((mbi->mi_flags & MULTIBOOT_INFO_HAS_MODS) == 0U) {
|
||||
clac();
|
||||
ASSERT(false, "no kernel info found");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -59,6 +61,7 @@ int32_t init_vm_boot_info(struct acrn_vm *vm)
|
||||
strnlen_s(vm->vm_desc->bootargs, MEM_2K);
|
||||
|
||||
vm->sw.linux_info.bootargs_load_addr = (void *)(vm->vm_desc->mem_size - 8*1024UL);
|
||||
clac();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -175,9 +178,11 @@ int32_t init_vm_boot_info(struct acrn_vm *vm)
|
||||
|
||||
mbi = (struct multiboot_info *)hpa2hva((uint64_t)boot_regs[1]);
|
||||
|
||||
stac();
|
||||
dev_dbg(ACRN_DBG_BOOT, "Multiboot detected, flag=0x%x", mbi->mi_flags);
|
||||
if ((mbi->mi_flags & MULTIBOOT_INFO_HAS_MODS) == 0U) {
|
||||
ASSERT(false, "no sos kernel info found");
|
||||
clac();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -261,6 +266,7 @@ int32_t init_vm_boot_info(struct acrn_vm *vm)
|
||||
/*parse other modules, like firmware /ramdisk */
|
||||
parse_other_modules(vm, mods + 1, mbi->mi_mods_count - 1);
|
||||
}
|
||||
clac();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -1067,6 +1067,7 @@ int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
||||
|
||||
intr_hdr = (struct acrn_intr_monitor *)hpa2hva(hpa);
|
||||
|
||||
stac();
|
||||
switch (intr_hdr->cmd) {
|
||||
case INTR_CMD_GET_DATA:
|
||||
intr_hdr->buf_cnt = ptirq_get_intr_data(target_vm,
|
||||
@ -1085,6 +1086,7 @@ int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
||||
}
|
||||
|
||||
pr_dbg("intr monitor:%d, cnt=%d", intr_hdr->cmd, intr_hdr->buf_cnt);
|
||||
clac();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ static void fire_vhm_interrupt(void)
|
||||
vlapic_intr_edge(vcpu, acrn_vhm_vector);
|
||||
}
|
||||
|
||||
#if defined(HV_DEBUG)
|
||||
static void acrn_print_request(uint16_t vcpu_id, const struct vhm_request *req)
|
||||
{
|
||||
switch (req->type) {
|
||||
@ -54,6 +55,7 @@ static void acrn_print_request(uint16_t vcpu_id, const struct vhm_request *req)
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Reset all IO requests status of the VM
|
||||
@ -122,8 +124,9 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
|
||||
|
||||
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
|
||||
cur = vcpu->vcpu_id;
|
||||
vhm_req = &req_buf->req_queue[cur];
|
||||
|
||||
stac();
|
||||
vhm_req = &req_buf->req_queue[cur];
|
||||
/* ACRN insert request to VHM and inject upcall */
|
||||
vhm_req->type = io_req->type;
|
||||
(void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request),
|
||||
@ -131,6 +134,7 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
|
||||
if (vcpu->vm->sw.is_completion_polling) {
|
||||
vhm_req->completion_polling = 1U;
|
||||
}
|
||||
clac();
|
||||
|
||||
/* pause vcpu, wait for VHM to handle the MMIO request.
|
||||
* TODO: when pause_vcpu changed to switch vcpu out directlly, we
|
||||
@ -145,7 +149,11 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
|
||||
*/
|
||||
set_vhm_req_state(vcpu->vm, vcpu->vcpu_id, REQ_STATE_PENDING);
|
||||
|
||||
#if defined(HV_DEBUG)
|
||||
stac();
|
||||
acrn_print_request(vcpu->vcpu_id, vhm_req);
|
||||
clac();
|
||||
#endif
|
||||
|
||||
/* signal VHM */
|
||||
fire_vhm_interrupt();
|
||||
@ -164,8 +172,10 @@ uint32_t get_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id)
|
||||
return (uint32_t)-1;
|
||||
}
|
||||
|
||||
stac();
|
||||
vhm_req = &req_buf->req_queue[vhm_req_id];
|
||||
state = atomic_load32(&vhm_req->processed);
|
||||
clac();
|
||||
|
||||
return state;
|
||||
}
|
||||
@ -180,6 +190,8 @@ void set_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id, uint32_t state)
|
||||
return;
|
||||
}
|
||||
|
||||
stac();
|
||||
vhm_req = &req_buf->req_queue[vhm_req_id];
|
||||
atomic_store32(&vhm_req->processed, state);
|
||||
clac();
|
||||
}
|
||||
|
@ -13,16 +13,13 @@ static void prepare_bsp_gdt(struct acrn_vm *vm)
|
||||
{
|
||||
size_t gdt_len;
|
||||
uint64_t gdt_base_hpa;
|
||||
void *gdt_base_hva;
|
||||
|
||||
gdt_base_hpa = gpa2hpa(vm, boot_context.gdt.base);
|
||||
if (boot_context.gdt.base == gdt_base_hpa) {
|
||||
return;
|
||||
} else {
|
||||
gdt_base_hva = hpa2hva(gdt_base_hpa);
|
||||
gdt_len = ((size_t)boot_context.gdt.limit + 1U) / sizeof(uint8_t);
|
||||
|
||||
(void )memcpy_s(gdt_base_hva, gdt_len, hpa2hva(boot_context.gdt.base), gdt_len);
|
||||
(void)copy_to_gpa(vm, hpa2hva(boot_context.gdt.base), boot_context.gdt.base, gdt_len);
|
||||
}
|
||||
|
||||
return;
|
||||
@ -41,6 +38,7 @@ static uint64_t create_zero_page(struct acrn_vm *vm)
|
||||
hva = (struct zero_page *)gpa2hva(vm, gpa);
|
||||
zeropage = hva;
|
||||
|
||||
stac();
|
||||
/* clear the zeropage */
|
||||
(void)memset(zeropage, 0U, MEM_2K);
|
||||
|
||||
@ -68,6 +66,7 @@ static uint64_t create_zero_page(struct acrn_vm *vm)
|
||||
|
||||
/* Create/add e820 table entries in zeropage */
|
||||
zeropage->e820_nentries = (uint8_t)create_e820_table(zeropage->entries);
|
||||
clac();
|
||||
|
||||
/* Return Physical Base Address of zeropage */
|
||||
return gpa;
|
||||
@ -76,7 +75,6 @@ static uint64_t create_zero_page(struct acrn_vm *vm)
|
||||
int32_t general_sw_loader(struct acrn_vm *vm)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
void *hva;
|
||||
char dyn_bootargs[100] = {0};
|
||||
uint32_t kernel_entry_offset;
|
||||
struct zero_page *zeropage;
|
||||
@ -92,7 +90,9 @@ int32_t general_sw_loader(struct acrn_vm *vm)
|
||||
|
||||
/* calculate the kernel entry point */
|
||||
zeropage = (struct zero_page *)sw_kernel->kernel_src_addr;
|
||||
stac();
|
||||
kernel_entry_offset = (uint32_t)(zeropage->hdr.setup_sects + 1U) * 512U;
|
||||
clac();
|
||||
if (vcpu->arch.cpu_mode == CPU_MODE_64BIT) {
|
||||
/* 64bit entry is the 512bytes after the start */
|
||||
kernel_entry_offset += 512U;
|
||||
@ -106,11 +106,9 @@ int32_t general_sw_loader(struct acrn_vm *vm)
|
||||
sw_kernel->kernel_entry_addr);
|
||||
}
|
||||
|
||||
/* Calculate the host-physical address where the guest will be loaded */
|
||||
hva = gpa2hva(vm, (uint64_t)sw_kernel->kernel_load_addr);
|
||||
|
||||
/* Copy the guest kernel image to its run-time location */
|
||||
(void)memcpy_s((void *)hva, sw_kernel->kernel_size, sw_kernel->kernel_src_addr, sw_kernel->kernel_size);
|
||||
(void)copy_to_gpa(vm, sw_kernel->kernel_src_addr,
|
||||
(uint64_t)sw_kernel->kernel_load_addr, sw_kernel->kernel_size);
|
||||
|
||||
/* See if guest is a Linux guest */
|
||||
if (vm->sw.kernel_type == VM_LINUX_GUEST) {
|
||||
@ -123,11 +121,10 @@ int32_t general_sw_loader(struct acrn_vm *vm)
|
||||
vcpu_set_gpreg(vcpu, i, 0UL);
|
||||
}
|
||||
|
||||
/* Get host-physical address for guest bootargs */
|
||||
hva = gpa2hva(vm, (uint64_t)linux_info->bootargs_load_addr);
|
||||
|
||||
/* Copy Guest OS bootargs to its load location */
|
||||
(void)strncpy_s((char *)hva, MEM_2K, linux_info->bootargs_src_addr, linux_info->bootargs_size);
|
||||
(void)copy_to_gpa(vm, linux_info->bootargs_src_addr,
|
||||
(uint64_t)linux_info->bootargs_load_addr,
|
||||
(strnlen_s((char *)linux_info->bootargs_src_addr, MEM_2K - 1U) + 1U));
|
||||
|
||||
/* add "hugepagesz=1G hugepages=x" to cmdline for 1G hugepage
|
||||
* reserving. Current strategy is "total_mem_size in Giga -
|
||||
@ -143,19 +140,18 @@ int32_t general_sw_loader(struct acrn_vm *vm)
|
||||
#endif
|
||||
if (reserving_1g_pages > 0) {
|
||||
snprintf(dyn_bootargs, 100U, " hugepagesz=1G hugepages=%d", reserving_1g_pages);
|
||||
(void)strncpy_s((char *)hva + linux_info->bootargs_size, 100U, dyn_bootargs, 100U);
|
||||
(void)copy_to_gpa(vm, dyn_bootargs, ((uint64_t)linux_info->bootargs_load_addr
|
||||
+ linux_info->bootargs_size),
|
||||
(strnlen_s(dyn_bootargs, 99U) + 1U));
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if a RAM disk is present with Linux guest */
|
||||
if (linux_info->ramdisk_src_addr != NULL) {
|
||||
/* Get host-physical address for guest RAM disk */
|
||||
hva = gpa2hva(vm, (uint64_t)linux_info->ramdisk_load_addr);
|
||||
|
||||
/* Copy RAM disk to its load location */
|
||||
(void)memcpy_s((void *)hva, linux_info->ramdisk_size,
|
||||
linux_info->ramdisk_src_addr, linux_info->ramdisk_size);
|
||||
|
||||
(void)copy_to_gpa(vm, linux_info->ramdisk_src_addr,
|
||||
(uint64_t)linux_info->ramdisk_load_addr,
|
||||
linux_info->ramdisk_size);
|
||||
}
|
||||
|
||||
/* Create Zeropage and copy Physical Base Address of Zeropage
|
||||
|
@ -28,8 +28,11 @@ uint32_t sbuf_next_ptr(uint32_t pos_arg,
|
||||
uint32_t sbuf_get(struct shared_buf *sbuf, uint8_t *data)
|
||||
{
|
||||
const void *from;
|
||||
uint32_t ele_size;
|
||||
|
||||
stac();
|
||||
if (sbuf_is_empty(sbuf)) {
|
||||
clac();
|
||||
/* no data available */
|
||||
return 0;
|
||||
}
|
||||
@ -40,7 +43,10 @@ uint32_t sbuf_get(struct shared_buf *sbuf, uint8_t *data)
|
||||
|
||||
sbuf->head = sbuf_next_ptr(sbuf->head, sbuf->ele_size, sbuf->size);
|
||||
|
||||
return sbuf->ele_size;
|
||||
ele_size = sbuf->ele_size;
|
||||
clac();
|
||||
|
||||
return ele_size;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -65,8 +71,10 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data)
|
||||
{
|
||||
void *to;
|
||||
uint32_t next_tail;
|
||||
uint32_t ele_size;
|
||||
bool trigger_overwrite = false;
|
||||
|
||||
stac();
|
||||
next_tail = sbuf_next_ptr(sbuf->tail, sbuf->ele_size, sbuf->size);
|
||||
/* if this write would trigger overrun */
|
||||
if (next_tail == sbuf->head) {
|
||||
@ -74,6 +82,7 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data)
|
||||
sbuf->overrun_cnt += sbuf->flags & OVERRUN_CNT_EN;
|
||||
if ((sbuf->flags & OVERWRITE_EN) == 0U) {
|
||||
/* if not enable over write, return here. */
|
||||
clac();
|
||||
return 0;
|
||||
}
|
||||
trigger_overwrite = true;
|
||||
@ -89,7 +98,10 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data)
|
||||
}
|
||||
sbuf->tail = next_tail;
|
||||
|
||||
return sbuf->ele_size;
|
||||
ele_size = sbuf->ele_size;
|
||||
clac();
|
||||
|
||||
return ele_size;
|
||||
}
|
||||
|
||||
int32_t sbuf_share_setup(uint16_t pcpu_id, uint32_t sbuf_id, uint64_t *hva)
|
||||
|
@ -100,6 +100,10 @@ void uart16550_init(void)
|
||||
uart_base_address = pci_pdev_read_cfg(serial_pci_bdf, pci_bar_offset(0), 4U) & PCIM_BAR_MEM_BASE;
|
||||
}
|
||||
|
||||
if (!serial_port_mapped) {
|
||||
hv_access_memory_region_update(uart_base_address, PDE_SIZE);
|
||||
}
|
||||
|
||||
spinlock_init(&uart_rx_lock);
|
||||
spinlock_init(&uart_tx_lock);
|
||||
/* Enable TX and RX FIFOs */
|
||||
|
@ -70,11 +70,13 @@ static int32_t vmsix_remap_entry(struct pci_vdev *vdev, uint32_t index, bool ena
|
||||
* fields with a single QWORD write, but some hardware can accept 32 bits
|
||||
* write only
|
||||
*/
|
||||
stac();
|
||||
mmio_write32((uint32_t)(info.pmsi_addr), (void *)&(pentry->addr));
|
||||
mmio_write32((uint32_t)(info.pmsi_addr >> 32U), (void *)((char *)&(pentry->addr) + 4U));
|
||||
|
||||
mmio_write32(info.pmsi_data, (void *)&(pentry->data));
|
||||
mmio_write32(vdev->msix.tables[index].vector_control, (void *)&(pentry->vector_control));
|
||||
clac();
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -278,6 +280,7 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
stac();
|
||||
/* MSI-X PBA and Capability Table could be in the same range */
|
||||
if (mmio->direction == REQUEST_READ) {
|
||||
/* mmio->size is either 4U or 8U */
|
||||
@ -294,6 +297,7 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
|
||||
mmio_write64(mmio->value, (void *)hva);
|
||||
}
|
||||
}
|
||||
clac();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -531,6 +531,17 @@ write_xcr(int32_t reg, uint64_t val)
|
||||
high = (uint32_t)(val >> 32U);
|
||||
asm volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
|
||||
}
|
||||
|
||||
static inline void stac(void)
|
||||
{
|
||||
asm volatile ("stac" : : : "memory");
|
||||
}
|
||||
|
||||
static inline void clac(void)
|
||||
{
|
||||
asm volatile ("clac" : : : "memory");
|
||||
}
|
||||
|
||||
#else /* ASSEMBLER defined */
|
||||
|
||||
#endif /* ASSEMBLER defined */
|
||||
|
@ -106,6 +106,14 @@ void enable_paging(void);
|
||||
* @return None
|
||||
*/
|
||||
void enable_smep(void);
|
||||
|
||||
/**
|
||||
* @brief Supervisor-mode Access Prevention (SMAP) enable
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void enable_smap(void);
|
||||
|
||||
/**
|
||||
* @brief MMU page tables initialization
|
||||
*
|
||||
@ -116,6 +124,8 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base,
|
||||
uint64_t size, uint64_t prot, const struct memory_ops *mem_ops);
|
||||
void mmu_modify_or_del(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct memory_ops *mem_ops, uint32_t type);
|
||||
void hv_access_memory_region_update(uint64_t base, uint64_t size);
|
||||
|
||||
/**
|
||||
* @brief EPT and VPID capability checking
|
||||
*
|
||||
|
@ -296,22 +296,25 @@ int32_t mptable_build(struct acrn_vm *vm)
|
||||
struct mpfps *mpfp;
|
||||
size_t mptable_length, table_length;
|
||||
|
||||
startaddr = (char *)gpa2hva(vm, MPTABLE_BASE);
|
||||
|
||||
table_length = vm->vm_desc->mptable->mpch.base_table_length;
|
||||
mptable_length = sizeof(struct mpfps) + table_length;
|
||||
/* Copy mptable info into guest memory */
|
||||
(void)memcpy_s((void *)startaddr, MPTABLE_MAX_LENGTH,
|
||||
(void *)vm->vm_desc->mptable,
|
||||
mptable_length);
|
||||
if (mptable_length > MPTABLE_MAX_LENGTH) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Copy mptable info into guest memory */
|
||||
copy_to_gpa(vm, (void *)vm->vm_desc->mptable, MPTABLE_BASE, mptable_length);
|
||||
|
||||
startaddr = (char *)gpa2hva(vm, MPTABLE_BASE);
|
||||
curraddr = startaddr;
|
||||
stac();
|
||||
mpfp = (struct mpfps *)curraddr;
|
||||
mpfp->checksum = mpt_compute_checksum(mpfp, sizeof(struct mpfps));
|
||||
curraddr += sizeof(struct mpfps);
|
||||
|
||||
mpch = (struct mpcth *)curraddr;
|
||||
mpch->checksum = mpt_compute_checksum(mpch, mpch->base_table_length);
|
||||
clac();
|
||||
|
||||
return 0U;
|
||||
}
|
||||
|
@ -328,22 +328,25 @@ int32_t mptable_build(struct acrn_vm *vm)
|
||||
struct mpfps *mpfp;
|
||||
size_t mptable_length, table_length;
|
||||
|
||||
startaddr = (char *)gpa2hva(vm, MPTABLE_BASE);
|
||||
|
||||
table_length = vm->vm_desc->mptable->mpch.base_table_length;
|
||||
mptable_length = sizeof(struct mpfps) + table_length;
|
||||
/* Copy mptable info into guest memory */
|
||||
(void)memcpy_s((void *)startaddr, MPTABLE_MAX_LENGTH,
|
||||
(void *)vm->vm_desc->mptable,
|
||||
mptable_length);
|
||||
if (mptable_length > MPTABLE_MAX_LENGTH) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Copy mptable info into guest memory */
|
||||
copy_to_gpa(vm, (void *)vm->vm_desc->mptable, MPTABLE_BASE, mptable_length);
|
||||
|
||||
startaddr = (char *)gpa2hva(vm, MPTABLE_BASE);
|
||||
curraddr = startaddr;
|
||||
stac();
|
||||
mpfp = (struct mpfps *)curraddr;
|
||||
mpfp->checksum = mpt_compute_checksum(mpfp, sizeof(struct mpfps));
|
||||
curraddr += sizeof(struct mpfps);
|
||||
|
||||
mpch = (struct mpcth *)curraddr;
|
||||
mpch->checksum = mpt_compute_checksum(mpch, mpch->base_table_length);
|
||||
clac();
|
||||
|
||||
return 0U;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user