diff --git a/hypervisor/arch/x86/ept.c b/hypervisor/arch/x86/ept.c index 1e1cb93e8..fb49d76c1 100644 --- a/hypervisor/arch/x86/ept.c +++ b/hypervisor/arch/x86/ept.c @@ -60,10 +60,10 @@ void destroy_ept(struct vm *vm) if (vm->arch_vm.m2p != NULL) free_ept_mem((uint64_t *)vm->arch_vm.m2p); } - +/* using return value INVALID_HPA as error code */ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size) { - uint64_t hpa = 0UL; + uint64_t hpa = INVALID_HPA; uint64_t *pgentry, pg_size = 0UL; void *eptp; struct vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id()); @@ -83,15 +83,19 @@ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size) pr_err("VM %d GPA2HPA: failed for gpa 0x%llx", vm->vm_id, gpa); } - - if (size != NULL) { + /** + * If specified parameter size is not NULL and + * the HPA of parameter gpa is found, pg_size shall + * be returned through parameter size. + */ + if ((size != NULL) && (hpa != INVALID_HPA)) { *size = (uint32_t)pg_size; } return hpa; } -/* using return value 0 as failure, make sure guest will not use hpa 0 */ +/* using return value INVALID_HPA as error code */ uint64_t gpa2hpa(struct vm *vm, uint64_t gpa) { return local_gpa2hpa(vm, gpa, NULL); @@ -264,7 +268,9 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page, vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH); } } - +/** + * @pre [gpa,gpa+size) has been mapped into host physical memory region + */ void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size) { diff --git a/hypervisor/arch/x86/guest/guest.c b/hypervisor/arch/x86/guest/guest.c index cce891967..f080bca39 100644 --- a/hypervisor/arch/x86/guest/guest.c +++ b/hypervisor/arch/x86/guest/guest.c @@ -336,9 +336,10 @@ static inline uint32_t local_copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, void *g_ptr; hpa = local_gpa2hpa(vm, gpa, &pg_size); - if (pg_size == 0U) { - pr_err("GPA2HPA not found"); - return 0; + if (hpa == INVALID_HPA) { + pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping", + __func__, vm->vm_id, gpa); + return 0U; } if (fix_pg_size != 0U) { diff --git a/hypervisor/arch/x86/mmu.c b/hypervisor/arch/x86/mmu.c index 79a2109e8..49468c09e 100644 --- a/hypervisor/arch/x86/mmu.c +++ b/hypervisor/arch/x86/mmu.c @@ -292,7 +292,8 @@ bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg) curr_hpa = gpa2hpa(vm, gpa); gpa += PAGE_SIZE_4K; next_hpa = gpa2hpa(vm, gpa); - if (next_hpa != (curr_hpa + PAGE_SIZE_4K)) { + if ((curr_hpa == INVALID_HPA) || (next_hpa == INVALID_HPA) + || (next_hpa != (curr_hpa + PAGE_SIZE_4K))) { return false; } size -= PAGE_SIZE_4K; diff --git a/hypervisor/arch/x86/pagetable.c b/hypervisor/arch/x86/pagetable.c index 9efe75ad8..a1fd00420 100644 --- a/hypervisor/arch/x86/pagetable.c +++ b/hypervisor/arch/x86/pagetable.c @@ -417,15 +417,14 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, } } +/** + * @pre (pml4_page != NULL) && (pg_size != NULL) + */ uint64_t *lookup_address(uint64_t *pml4_page, uint64_t addr, uint64_t *pg_size, enum _page_table_type ptt) { uint64_t *pml4e, *pdpte, *pde, *pte; - if ((pml4_page == NULL) || (pg_size == NULL)) { - return NULL; - } - pml4e = pml4e_offset(pml4_page, addr); if (pgentry_present(ptt, *pml4e) == 0UL) { return NULL; diff --git a/hypervisor/arch/x86/trusty.c b/hypervisor/arch/x86/trusty.c index 5e9f2bd01..af4d93a2d 100644 --- a/hypervisor/arch/x86/trusty.c +++ b/hypervisor/arch/x86/trusty.c @@ -63,6 +63,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig, uint64_t nworld_pml4e; uint64_t sworld_pml4e; uint64_t gpa; + /* Check the HPA of parameter gpa_orig when invoking check_continuos_hpa */ uint64_t hpa = gpa2hpa(vm, gpa_orig); uint64_t table_present = EPT_RWX; uint64_t pdpte, *dest_pdpte_p, *src_pdpte_p; @@ -76,7 +77,10 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig, return; } - /* Check the physical address should be continuous */ + /** + * Check the HPA of parameter gpa_orig should exist + * Check the physical address should be continuous + */ if (!check_continuous_hpa(vm, gpa_orig, size)) { ASSERT(false, "The physical addr is not continuous for Trusty"); return; diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 7840c5db1..227bbf1fc 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -371,8 +371,9 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param) vmid, iobuf.req_buf); hpa = gpa2hpa(vm, iobuf.req_buf); - if (hpa == 0UL) { - pr_err("%s: invalid GPA.\n", __func__); + if (hpa == INVALID_HPA) { + pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.", + __func__, vm->vm_id, iobuf.req_buf); target_vm->sw.io_shared_page = NULL; return -EINVAL; } @@ -437,6 +438,11 @@ static int32_t local_set_vm_memory_region(struct vm *vm, pml4_page = (uint64_t *)target_vm->arch_vm.nworld_eptp; if (region->type != MR_DEL) { hpa = gpa2hpa(vm, region->vm0_gpa); + if (hpa == INVALID_HPA) { + pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.", + __func__, vm->vm_id, region->vm0_gpa); + return -EINVAL; + } base_paddr = get_hv_image_base(); if (((hpa <= base_paddr) && ((hpa + region->size) > base_paddr)) || @@ -558,6 +564,11 @@ static int32_t write_protect_page(struct vm *vm, struct wp_data *wp) uint64_t prot_clr; hpa = gpa2hpa(vm, wp->gpa); + if (hpa == INVALID_HPA) { + pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.", + __func__, vm->vm_id, wp->gpa); + return -EINVAL; + } dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x", vm->vm_id, wp->gpa, hpa); @@ -666,6 +677,11 @@ int32_t hcall_gpa_to_hpa(struct vm *vm, uint16_t vmid, uint64_t param) return -1; } v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa); + if (v_gpa2hpa.hpa == INVALID_HPA) { + pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.", + __func__, target_vm->vm_id, v_gpa2hpa.gpa); + return -EINVAL; + } if (copy_to_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; @@ -985,8 +1001,9 @@ int32_t hcall_vm_intr_monitor(struct vm *vm, uint16_t vmid, uint64_t param) /* the param for this hypercall is page aligned */ hpa = gpa2hpa(vm, param); - if (hpa == 0UL) { - pr_err("%s: invalid GPA.\n", __func__); + if (hpa == INVALID_HPA) { + pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.", + __func__, vm->vm_id, param); return -EINVAL; } diff --git a/hypervisor/include/arch/x86/mmu.h b/hypervisor/include/arch/x86/mmu.h index 5fc530191..51057dd11 100644 --- a/hypervisor/include/arch/x86/mmu.h +++ b/hypervisor/include/arch/x86/mmu.h @@ -90,6 +90,9 @@ void flush_vpid_single(uint16_t vpid); void flush_vpid_global(void); void invept(struct vcpu *vcpu); bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg); +/** + *@pre (pml4_page != NULL) && (pg_size != NULL) + */ uint64_t *lookup_address(uint64_t *pml4_page, uint64_t addr, uint64_t *pg_size, enum _page_table_type ptt); @@ -125,15 +128,32 @@ static inline void clflush(volatile void *p) asm volatile ("clflush (%0)" :: "r"(p)); } +/** + * Invalid HPA is defined for error checking, + * according to SDM vol.3A 4.1.4, the maximum + * host physical address width is 52 + */ +#define INVALID_HPA (0x1UL << 52U) /* External Interfaces */ void destroy_ept(struct vm *vm); +/** + * @return INVALID_HPA - the HPA of parameter gpa is unmapping + * @return hpa - the HPA of parameter gpa is hpa + */ uint64_t gpa2hpa(struct vm *vm, uint64_t gpa); +/** + * @return INVALID_HPA - the HPA of parameter gpa is unmapping + * @return hpa - the HPA of parameter gpa is hpa + */ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size); uint64_t hpa2gpa(struct vm *vm, uint64_t hpa); void ept_mr_add(struct vm *vm, uint64_t *pml4_page, uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig); void ept_mr_modify(struct vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size, uint64_t prot_set, uint64_t prot_clr); +/** + * @pre [gpa,gpa+size) has been mapped into host physical memory region + */ void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size); void free_ept_mem(uint64_t *pml4_page);