hv: ept: refine ept_mr_add base on mmu_add

Refine ept_mr_add base on mmu_add. The old map_mem could be removed
once this was done.

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1 2018-07-26 13:28:08 +08:00 committed by lijinxia
parent da57284ba5
commit 4bb8456de8
7 changed files with 47 additions and 52 deletions

View File

@ -258,20 +258,17 @@ int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu)
return status;
}
int ept_mr_add(const struct vm *vm, uint64_t hpa_arg,
uint64_t gpa_arg, uint64_t size, uint32_t prot_arg)
int ept_mr_add(const struct vm *vm, uint64_t *pml4_page,
uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig)
{
struct mem_map_params map_params;
uint16_t i;
struct vcpu *vcpu;
uint64_t hpa = hpa_arg;
uint64_t gpa = gpa_arg;
uint32_t prot = prot_arg;
int ret;
uint64_t prot = prot_orig;
/* Setup memory map parameters */
map_params.page_table_type = PTT_EPT;
map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm->arch_vm.m2p;
dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016llx gpa: 0x%016llx ",
__func__, vm->vm_id, hpa, gpa);
dev_dbg(ACRN_DBG_EPT, "size: 0x%016llx prot: 0x%016x\n", size, prot);
/* EPT & VT-d share the same page tables, set SNP bit
* to force snooping of PCIe devices if the page
@ -280,21 +277,18 @@ int ept_mr_add(const struct vm *vm, uint64_t hpa_arg,
if ((prot & IA32E_EPT_MT_MASK) != IA32E_EPT_UNCACHED) {
prot |= IA32E_EPT_SNOOP_CTRL;
}
/* TODO: replace map_mem with mmu_add once SOS has add
* HC_VM_WRITE_PROTECT_PAGE support.
*/
map_mem(&map_params, (void *)hpa,
(void *)gpa, size, prot);
ret = mmu_add(pml4_page, hpa, gpa, size, prot, PTT_EPT);
if (ret == 0) {
ret = mmu_add((uint64_t *)vm->arch_vm.m2p,
gpa, hpa, size, prot, PTT_EPT);
}
foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
dev_dbg(ACRN_DBG_EPT, "%s, hpa: 0x%016llx gpa: 0x%016llx ",
__func__, hpa, gpa);
dev_dbg(ACRN_DBG_EPT, "size: 0x%016llx prot: 0x%x\n", size, prot);
return 0;
return ret;
}
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page,
@ -323,13 +317,12 @@ int ept_mr_del(const struct vm *vm, uint64_t *pml4_page,
int ret;
uint64_t hpa = gpa2hpa(vm, gpa);
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n",
__func__, vm->vm_id, gpa, size);
ret = mmu_modify_or_del(pml4_page, gpa, size,
0UL, 0UL, PTT_EPT, MR_DEL);
if (ret < 0) {
return ret;
}
if (hpa != 0UL) {
if ((ret == 0) && (hpa != 0UL)) {
ret = mmu_modify_or_del((uint64_t *)vm->arch_vm.m2p,
hpa, size, 0UL, 0UL, PTT_EPT, MR_DEL);
}
@ -338,8 +331,5 @@ int ept_mr_del(const struct vm *vm, uint64_t *pml4_page,
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
dev_dbg(ACRN_DBG_EPT, "%s, gpa 0x%llx size 0x%llx\n",
__func__, gpa, size);
return 0;
return ret;
}

View File

@ -564,6 +564,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED);
struct e820_entry *entry;
uint64_t hv_hpa;
uint64_t *pml4_page = (uint64_t *)vm->arch_vm.nworld_eptp;
rebuild_vm0_e820();
dev_dbg(ACRN_DBG_GUEST,
@ -571,7 +572,8 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
e820_mem.mem_bottom, e820_mem.mem_top);
/* create real ept map for all ranges with UC */
ept_mr_add(vm, e820_mem.mem_bottom, e820_mem.mem_bottom,
ept_mr_add(vm, pml4_page,
e820_mem.mem_bottom, e820_mem.mem_bottom,
(e820_mem.mem_top - e820_mem.mem_bottom),
attr_uc);
@ -579,7 +581,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
if (entry->type == E820_TYPE_RAM) {
ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
ept_mr_modify(vm, pml4_page,
entry->baseaddr, entry->length,
EPT_WB, EPT_MT_MASK);
}
@ -599,8 +601,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
* will cause EPT violation if sos accesses hv memory
*/
hv_hpa = get_hv_image_base();
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
hv_hpa, CONFIG_RAM_SIZE);
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_RAM_SIZE);
return 0;
}

View File

@ -2055,11 +2055,15 @@ int vlapic_create(struct vcpu *vcpu)
}
if (is_vcpu_bsp(vcpu)) {
ept_mr_add(vcpu->vm,
uint64_t *pml4_page =
(uint64_t *)vcpu->vm->arch_vm.nworld_eptp;
ept_mr_del(vcpu->vm, pml4_page,
DEFAULT_APIC_BASE, CPU_PAGE_SIZE);
ept_mr_add(vcpu->vm, pml4_page,
vlapic_apicv_get_apic_access_addr(vcpu->vm),
DEFAULT_APIC_BASE, CPU_PAGE_SIZE,
IA32E_EPT_W_BIT | IA32E_EPT_R_BIT |
IA32E_EPT_UNCACHED);
EPT_WR | EPT_RD | EPT_UNCACHED);
}
} else {
/*No APICv support*/

View File

@ -344,6 +344,7 @@ static int add_pde(uint64_t *pdpte, uint64_t paddr_start,
vaddr = vaddr_next;
continue;
}
return 0;
} else {
ret = construct_pgentry(ptt, pde);
if (ret != 0) {

View File

@ -166,6 +166,9 @@ void destroy_secure_world(struct vm *vm, bool need_clr_mem)
{
void *pdpt_addr;
struct vm *vm0 = get_vm_from_vmid(0U);
uint64_t hpa = vm->sworld_control.sworld_memory.base_hpa;
uint64_t gpa = vm->sworld_control.sworld_memory.base_gpa_in_sos;
uint64_t size = vm->sworld_control.sworld_memory.length;
if (vm0 == NULL) {
pr_err("Parse vm0 context failed.");
@ -173,23 +176,18 @@ void destroy_secure_world(struct vm *vm, bool need_clr_mem)
}
if (need_clr_mem) {
/* clear trusty memory space */
(void)memset(HPA2HVA(vm->sworld_control.sworld_memory.base_hpa),
0U, vm->sworld_control.sworld_memory.length);
(void)memset(HPA2HVA(hpa), 0U, size);
}
/* restore memory to SOS ept mapping */
if (ept_mr_add(vm0, vm->sworld_control.sworld_memory.base_hpa,
vm->sworld_control.sworld_memory.base_gpa_in_sos,
vm->sworld_control.sworld_memory.length,
EPT_RWX | EPT_WB) != 0) {
if (ept_mr_add(vm0, vm0->arch_vm.nworld_eptp,
hpa, gpa, size, EPT_RWX | EPT_WB) != 0) {
pr_warn("Restore trusty mem to SOS failed");
}
/* Restore memory to guest normal world */
if (ept_mr_add(vm, vm->sworld_control.sworld_memory.base_hpa,
vm->sworld_control.sworld_memory.base_gpa_in_uos,
vm->sworld_control.sworld_memory.length,
EPT_RWX | EPT_WB) != 0) {
if (ept_mr_add(vm, vm->arch_vm.nworld_eptp,
hpa, gpa, size, EPT_RWX | EPT_WB) != 0) {
pr_warn("Restore trusty mem to nworld failed");
}

View File

@ -422,6 +422,7 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
{
uint64_t hpa, base_paddr;
uint64_t prot;
uint64_t *pml4_page;
if ((region->size & (CPU_PAGE_SIZE - 1UL)) != 0UL) {
pr_err("%s: [vm%d] map size 0x%x is not page aligned",
@ -442,6 +443,7 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
return -EFAULT;
}
pml4_page = (uint64_t *)target_vm->arch_vm.nworld_eptp;
if (region->type != MR_DEL) {
prot = 0UL;
/* access right */
@ -467,11 +469,10 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
prot |= EPT_UNCACHED;
}
/* create gpa to hpa EPT mapping */
return ept_mr_add(target_vm, hpa,
return ept_mr_add(target_vm, pml4_page, hpa,
region->gpa, region->size, prot);
} else {
return ept_mr_del(target_vm,
(uint64_t *)target_vm->arch_vm.nworld_eptp,
return ept_mr_del(target_vm, pml4_page,
region->gpa, region->size);
}

View File

@ -380,8 +380,8 @@ void destroy_ept(struct vm *vm);
uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa);
uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size);
uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa);
int ept_mr_add(const struct vm *vm, uint64_t hpa_arg,
uint64_t gpa_arg, uint64_t size, uint32_t prot_arg);
int ept_mr_add(const struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
uint64_t gpa, uint64_t size, uint64_t prot_orig);
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr);