HV:MM:gpa2hpa related error checking fix

In the current hypervisor design, when HPA is not
found for the specified gpa by calling gpa2hpa or
local_gpa2hpa, 0 will be returned as a error code,
but 0 may be a valid HPA for vm0; error checking
is missed when invoking gpa2hpa or local_gpa2hpa;
when invoking lookup_address, the caller guarantees
that parameter pointer pml4_page and pointer pg_size
is not NULL.

If local_gpa2hpa/gpa2hpa returns a invalid HPA,
it means that this function fails to find the
HPA of the specified gpa of vm. If local_gpa2hpa/gpa2hpa
return value is a valid HPA, it means that this
function have found the HPA of the specified gpa of vm.

Each valid vm's EPTP is initialized during vm creating,
vm's EPTP is valid until this vm is destroyed. So the caller
can guarantee parameter pointer pml4_page is not NULL.
The caller uses a temporary variable to store page size.
So the caller can guarantee parameter pointer pg_size
is not NULL.

In this patch, define a invalid HPA for gpa2hpa and
local_gpa2hpa;add some error checking when invoking
local_gpa2hpa/gpa2hpa;add precondition for lookup_address
function and remove redundant error checking.

V1-->V2:
	Define INVALID_HPA as a invalid HPA for gpa2hpa
	and local_gpa2hpa;
	Updated related error checking when invoking
	gpa2hpa or local_gpa2hpa;
V2-->V3:
	Add some debug information if specified gpa2hpa
	mapping doesn't exit and ept_mr_del is called;
	Update INVALID_HPA definition easier to be reviewed.
V3-->V4:
	Add vm->id and gpa into pr_error;
	Add precondition to ept_mr_del to cover [gpa,gpa+size)
	unmapping case.
V4-->V5:
	Update comments;
	Update pr_error message.

Tracked-On: #1258

Signed-off-by: Xiangyang Wu <xiangyang.wu@linux.intel.com>
Reviewed-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Xiangyang Wu 2018-10-10 15:23:50 +08:00 committed by wenlingz
parent 041bd594ae
commit a11a10fa4e
7 changed files with 67 additions and 19 deletions

View File

@ -60,10 +60,10 @@ void destroy_ept(struct vm *vm)
if (vm->arch_vm.m2p != NULL) if (vm->arch_vm.m2p != NULL)
free_ept_mem((uint64_t *)vm->arch_vm.m2p); free_ept_mem((uint64_t *)vm->arch_vm.m2p);
} }
/* using return value INVALID_HPA as error code */
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size) uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
{ {
uint64_t hpa = 0UL; uint64_t hpa = INVALID_HPA;
uint64_t *pgentry, pg_size = 0UL; uint64_t *pgentry, pg_size = 0UL;
void *eptp; void *eptp;
struct vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id()); struct vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id());
@ -83,15 +83,19 @@ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
pr_err("VM %d GPA2HPA: failed for gpa 0x%llx", pr_err("VM %d GPA2HPA: failed for gpa 0x%llx",
vm->vm_id, gpa); vm->vm_id, gpa);
} }
/**
if (size != NULL) { * If specified parameter size is not NULL and
* the HPA of parameter gpa is found, pg_size shall
* be returned through parameter size.
*/
if ((size != NULL) && (hpa != INVALID_HPA)) {
*size = (uint32_t)pg_size; *size = (uint32_t)pg_size;
} }
return hpa; return hpa;
} }
/* using return value 0 as failure, make sure guest will not use hpa 0 */ /* using return value INVALID_HPA as error code */
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa) uint64_t gpa2hpa(struct vm *vm, uint64_t gpa)
{ {
return local_gpa2hpa(vm, gpa, NULL); return local_gpa2hpa(vm, gpa, NULL);
@ -264,7 +268,9 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH); vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
} }
} }
/**
* @pre [gpa,gpa+size) has been mapped into host physical memory region
*/
void ept_mr_del(struct vm *vm, uint64_t *pml4_page, void ept_mr_del(struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size) uint64_t gpa, uint64_t size)
{ {

View File

@ -336,9 +336,10 @@ static inline uint32_t local_copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
void *g_ptr; void *g_ptr;
hpa = local_gpa2hpa(vm, gpa, &pg_size); hpa = local_gpa2hpa(vm, gpa, &pg_size);
if (pg_size == 0U) { if (hpa == INVALID_HPA) {
pr_err("GPA2HPA not found"); pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping",
return 0; __func__, vm->vm_id, gpa);
return 0U;
} }
if (fix_pg_size != 0U) { if (fix_pg_size != 0U) {

View File

@ -292,7 +292,8 @@ bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg)
curr_hpa = gpa2hpa(vm, gpa); curr_hpa = gpa2hpa(vm, gpa);
gpa += PAGE_SIZE_4K; gpa += PAGE_SIZE_4K;
next_hpa = gpa2hpa(vm, gpa); next_hpa = gpa2hpa(vm, gpa);
if (next_hpa != (curr_hpa + PAGE_SIZE_4K)) { if ((curr_hpa == INVALID_HPA) || (next_hpa == INVALID_HPA)
|| (next_hpa != (curr_hpa + PAGE_SIZE_4K))) {
return false; return false;
} }
size -= PAGE_SIZE_4K; size -= PAGE_SIZE_4K;

View File

@ -417,15 +417,14 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
} }
} }
/**
* @pre (pml4_page != NULL) && (pg_size != NULL)
*/
uint64_t *lookup_address(uint64_t *pml4_page, uint64_t *lookup_address(uint64_t *pml4_page,
uint64_t addr, uint64_t *pg_size, enum _page_table_type ptt) uint64_t addr, uint64_t *pg_size, enum _page_table_type ptt)
{ {
uint64_t *pml4e, *pdpte, *pde, *pte; uint64_t *pml4e, *pdpte, *pde, *pte;
if ((pml4_page == NULL) || (pg_size == NULL)) {
return NULL;
}
pml4e = pml4e_offset(pml4_page, addr); pml4e = pml4e_offset(pml4_page, addr);
if (pgentry_present(ptt, *pml4e) == 0UL) { if (pgentry_present(ptt, *pml4e) == 0UL) {
return NULL; return NULL;

View File

@ -63,6 +63,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
uint64_t nworld_pml4e; uint64_t nworld_pml4e;
uint64_t sworld_pml4e; uint64_t sworld_pml4e;
uint64_t gpa; uint64_t gpa;
/* Check the HPA of parameter gpa_orig when invoking check_continuos_hpa */
uint64_t hpa = gpa2hpa(vm, gpa_orig); uint64_t hpa = gpa2hpa(vm, gpa_orig);
uint64_t table_present = EPT_RWX; uint64_t table_present = EPT_RWX;
uint64_t pdpte, *dest_pdpte_p, *src_pdpte_p; uint64_t pdpte, *dest_pdpte_p, *src_pdpte_p;
@ -76,7 +77,10 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
return; return;
} }
/* Check the physical address should be continuous */ /**
* Check the HPA of parameter gpa_orig should exist
* Check the physical address should be continuous
*/
if (!check_continuous_hpa(vm, gpa_orig, size)) { if (!check_continuous_hpa(vm, gpa_orig, size)) {
ASSERT(false, "The physical addr is not continuous for Trusty"); ASSERT(false, "The physical addr is not continuous for Trusty");
return; return;

View File

@ -371,8 +371,9 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
vmid, iobuf.req_buf); vmid, iobuf.req_buf);
hpa = gpa2hpa(vm, iobuf.req_buf); hpa = gpa2hpa(vm, iobuf.req_buf);
if (hpa == 0UL) { if (hpa == INVALID_HPA) {
pr_err("%s: invalid GPA.\n", __func__); pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
__func__, vm->vm_id, iobuf.req_buf);
target_vm->sw.io_shared_page = NULL; target_vm->sw.io_shared_page = NULL;
return -EINVAL; return -EINVAL;
} }
@ -437,6 +438,11 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
pml4_page = (uint64_t *)target_vm->arch_vm.nworld_eptp; pml4_page = (uint64_t *)target_vm->arch_vm.nworld_eptp;
if (region->type != MR_DEL) { if (region->type != MR_DEL) {
hpa = gpa2hpa(vm, region->vm0_gpa); hpa = gpa2hpa(vm, region->vm0_gpa);
if (hpa == INVALID_HPA) {
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
__func__, vm->vm_id, region->vm0_gpa);
return -EINVAL;
}
base_paddr = get_hv_image_base(); base_paddr = get_hv_image_base();
if (((hpa <= base_paddr) && if (((hpa <= base_paddr) &&
((hpa + region->size) > base_paddr)) || ((hpa + region->size) > base_paddr)) ||
@ -558,6 +564,11 @@ static int32_t write_protect_page(struct vm *vm, struct wp_data *wp)
uint64_t prot_clr; uint64_t prot_clr;
hpa = gpa2hpa(vm, wp->gpa); hpa = gpa2hpa(vm, wp->gpa);
if (hpa == INVALID_HPA) {
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
__func__, vm->vm_id, wp->gpa);
return -EINVAL;
}
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x", dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x",
vm->vm_id, wp->gpa, hpa); vm->vm_id, wp->gpa, hpa);
@ -666,6 +677,11 @@ int32_t hcall_gpa_to_hpa(struct vm *vm, uint16_t vmid, uint64_t param)
return -1; return -1;
} }
v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa); v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa);
if (v_gpa2hpa.hpa == INVALID_HPA) {
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
__func__, target_vm->vm_id, v_gpa2hpa.gpa);
return -EINVAL;
}
if (copy_to_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) { if (copy_to_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) {
pr_err("%s: Unable copy param to vm\n", __func__); pr_err("%s: Unable copy param to vm\n", __func__);
return -1; return -1;
@ -985,8 +1001,9 @@ int32_t hcall_vm_intr_monitor(struct vm *vm, uint16_t vmid, uint64_t param)
/* the param for this hypercall is page aligned */ /* the param for this hypercall is page aligned */
hpa = gpa2hpa(vm, param); hpa = gpa2hpa(vm, param);
if (hpa == 0UL) { if (hpa == INVALID_HPA) {
pr_err("%s: invalid GPA.\n", __func__); pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
__func__, vm->vm_id, param);
return -EINVAL; return -EINVAL;
} }

View File

@ -90,6 +90,9 @@ void flush_vpid_single(uint16_t vpid);
void flush_vpid_global(void); void flush_vpid_global(void);
void invept(struct vcpu *vcpu); void invept(struct vcpu *vcpu);
bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg); bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg);
/**
*@pre (pml4_page != NULL) && (pg_size != NULL)
*/
uint64_t *lookup_address(uint64_t *pml4_page, uint64_t addr, uint64_t *lookup_address(uint64_t *pml4_page, uint64_t addr,
uint64_t *pg_size, enum _page_table_type ptt); uint64_t *pg_size, enum _page_table_type ptt);
@ -125,15 +128,32 @@ static inline void clflush(volatile void *p)
asm volatile ("clflush (%0)" :: "r"(p)); asm volatile ("clflush (%0)" :: "r"(p));
} }
/**
* Invalid HPA is defined for error checking,
* according to SDM vol.3A 4.1.4, the maximum
* host physical address width is 52
*/
#define INVALID_HPA (0x1UL << 52U)
/* External Interfaces */ /* External Interfaces */
void destroy_ept(struct vm *vm); void destroy_ept(struct vm *vm);
/**
* @return INVALID_HPA - the HPA of parameter gpa is unmapping
* @return hpa - the HPA of parameter gpa is hpa
*/
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa); uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
/**
* @return INVALID_HPA - the HPA of parameter gpa is unmapping
* @return hpa - the HPA of parameter gpa is hpa
*/
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size); uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size);
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa); uint64_t hpa2gpa(struct vm *vm, uint64_t hpa);
void ept_mr_add(struct vm *vm, uint64_t *pml4_page, uint64_t hpa, void ept_mr_add(struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
uint64_t gpa, uint64_t size, uint64_t prot_orig); uint64_t gpa, uint64_t size, uint64_t prot_orig);
void ept_mr_modify(struct vm *vm, uint64_t *pml4_page, uint64_t gpa, void ept_mr_modify(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
uint64_t size, uint64_t prot_set, uint64_t prot_clr); uint64_t size, uint64_t prot_set, uint64_t prot_clr);
/**
* @pre [gpa,gpa+size) has been mapped into host physical memory region
*/
void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa, void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
uint64_t size); uint64_t size);
void free_ept_mem(uint64_t *pml4_page); void free_ept_mem(uint64_t *pml4_page);