hv: ept: refine ept_mr_add base on mmu_add

Refine ept_mr_add base on mmu_add. The old map_mem could be removed
once this was done.

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1 2018-07-26 13:28:08 +08:00 committed by lijinxia
parent da57284ba5
commit 4bb8456de8
7 changed files with 47 additions and 52 deletions

View File

@ -258,20 +258,17 @@ int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu)
return status; return status;
} }
int ept_mr_add(const struct vm *vm, uint64_t hpa_arg, int ept_mr_add(const struct vm *vm, uint64_t *pml4_page,
uint64_t gpa_arg, uint64_t size, uint32_t prot_arg) uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig)
{ {
struct mem_map_params map_params;
uint16_t i; uint16_t i;
struct vcpu *vcpu; struct vcpu *vcpu;
uint64_t hpa = hpa_arg; int ret;
uint64_t gpa = gpa_arg; uint64_t prot = prot_orig;
uint32_t prot = prot_arg;
/* Setup memory map parameters */ dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016llx gpa: 0x%016llx ",
map_params.page_table_type = PTT_EPT; __func__, vm->vm_id, hpa, gpa);
map_params.pml4_base = vm->arch_vm.nworld_eptp; dev_dbg(ACRN_DBG_EPT, "size: 0x%016llx prot: 0x%016x\n", size, prot);
map_params.pml4_inverted = vm->arch_vm.m2p;
/* EPT & VT-d share the same page tables, set SNP bit /* EPT & VT-d share the same page tables, set SNP bit
* to force snooping of PCIe devices if the page * to force snooping of PCIe devices if the page
@ -280,21 +277,18 @@ int ept_mr_add(const struct vm *vm, uint64_t hpa_arg,
if ((prot & IA32E_EPT_MT_MASK) != IA32E_EPT_UNCACHED) { if ((prot & IA32E_EPT_MT_MASK) != IA32E_EPT_UNCACHED) {
prot |= IA32E_EPT_SNOOP_CTRL; prot |= IA32E_EPT_SNOOP_CTRL;
} }
/* TODO: replace map_mem with mmu_add once SOS has add
* HC_VM_WRITE_PROTECT_PAGE support. ret = mmu_add(pml4_page, hpa, gpa, size, prot, PTT_EPT);
*/ if (ret == 0) {
map_mem(&map_params, (void *)hpa, ret = mmu_add((uint64_t *)vm->arch_vm.m2p,
(void *)gpa, size, prot); gpa, hpa, size, prot, PTT_EPT);
}
foreach_vcpu(i, vm, vcpu) { foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH); vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
} }
dev_dbg(ACRN_DBG_EPT, "%s, hpa: 0x%016llx gpa: 0x%016llx ", return ret;
__func__, hpa, gpa);
dev_dbg(ACRN_DBG_EPT, "size: 0x%016llx prot: 0x%x\n", size, prot);
return 0;
} }
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page, int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page,
@ -323,13 +317,12 @@ int ept_mr_del(const struct vm *vm, uint64_t *pml4_page,
int ret; int ret;
uint64_t hpa = gpa2hpa(vm, gpa); uint64_t hpa = gpa2hpa(vm, gpa);
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n",
__func__, vm->vm_id, gpa, size);
ret = mmu_modify_or_del(pml4_page, gpa, size, ret = mmu_modify_or_del(pml4_page, gpa, size,
0UL, 0UL, PTT_EPT, MR_DEL); 0UL, 0UL, PTT_EPT, MR_DEL);
if (ret < 0) { if ((ret == 0) && (hpa != 0UL)) {
return ret;
}
if (hpa != 0UL) {
ret = mmu_modify_or_del((uint64_t *)vm->arch_vm.m2p, ret = mmu_modify_or_del((uint64_t *)vm->arch_vm.m2p,
hpa, size, 0UL, 0UL, PTT_EPT, MR_DEL); hpa, size, 0UL, 0UL, PTT_EPT, MR_DEL);
} }
@ -338,8 +331,5 @@ int ept_mr_del(const struct vm *vm, uint64_t *pml4_page,
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH); vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
} }
dev_dbg(ACRN_DBG_EPT, "%s, gpa 0x%llx size 0x%llx\n", return ret;
__func__, gpa, size);
return 0;
} }

View File

@ -564,6 +564,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED); uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED);
struct e820_entry *entry; struct e820_entry *entry;
uint64_t hv_hpa; uint64_t hv_hpa;
uint64_t *pml4_page = (uint64_t *)vm->arch_vm.nworld_eptp;
rebuild_vm0_e820(); rebuild_vm0_e820();
dev_dbg(ACRN_DBG_GUEST, dev_dbg(ACRN_DBG_GUEST,
@ -571,7 +572,8 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
e820_mem.mem_bottom, e820_mem.mem_top); e820_mem.mem_bottom, e820_mem.mem_top);
/* create real ept map for all ranges with UC */ /* create real ept map for all ranges with UC */
ept_mr_add(vm, e820_mem.mem_bottom, e820_mem.mem_bottom, ept_mr_add(vm, pml4_page,
e820_mem.mem_bottom, e820_mem.mem_bottom,
(e820_mem.mem_top - e820_mem.mem_bottom), (e820_mem.mem_top - e820_mem.mem_bottom),
attr_uc); attr_uc);
@ -579,7 +581,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
for (i = 0U; i < e820_entries; i++) { for (i = 0U; i < e820_entries; i++) {
entry = &e820[i]; entry = &e820[i];
if (entry->type == E820_TYPE_RAM) { if (entry->type == E820_TYPE_RAM) {
ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp, ept_mr_modify(vm, pml4_page,
entry->baseaddr, entry->length, entry->baseaddr, entry->length,
EPT_WB, EPT_MT_MASK); EPT_WB, EPT_MT_MASK);
} }
@ -599,8 +601,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
* will cause EPT violation if sos accesses hv memory * will cause EPT violation if sos accesses hv memory
*/ */
hv_hpa = get_hv_image_base(); hv_hpa = get_hv_image_base();
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp, ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_RAM_SIZE);
hv_hpa, CONFIG_RAM_SIZE);
return 0; return 0;
} }

View File

@ -2055,11 +2055,15 @@ int vlapic_create(struct vcpu *vcpu)
} }
if (is_vcpu_bsp(vcpu)) { if (is_vcpu_bsp(vcpu)) {
ept_mr_add(vcpu->vm, uint64_t *pml4_page =
(uint64_t *)vcpu->vm->arch_vm.nworld_eptp;
ept_mr_del(vcpu->vm, pml4_page,
DEFAULT_APIC_BASE, CPU_PAGE_SIZE);
ept_mr_add(vcpu->vm, pml4_page,
vlapic_apicv_get_apic_access_addr(vcpu->vm), vlapic_apicv_get_apic_access_addr(vcpu->vm),
DEFAULT_APIC_BASE, CPU_PAGE_SIZE, DEFAULT_APIC_BASE, CPU_PAGE_SIZE,
IA32E_EPT_W_BIT | IA32E_EPT_R_BIT | EPT_WR | EPT_RD | EPT_UNCACHED);
IA32E_EPT_UNCACHED);
} }
} else { } else {
/*No APICv support*/ /*No APICv support*/

View File

@ -344,6 +344,7 @@ static int add_pde(uint64_t *pdpte, uint64_t paddr_start,
vaddr = vaddr_next; vaddr = vaddr_next;
continue; continue;
} }
return 0;
} else { } else {
ret = construct_pgentry(ptt, pde); ret = construct_pgentry(ptt, pde);
if (ret != 0) { if (ret != 0) {

View File

@ -166,6 +166,9 @@ void destroy_secure_world(struct vm *vm, bool need_clr_mem)
{ {
void *pdpt_addr; void *pdpt_addr;
struct vm *vm0 = get_vm_from_vmid(0U); struct vm *vm0 = get_vm_from_vmid(0U);
uint64_t hpa = vm->sworld_control.sworld_memory.base_hpa;
uint64_t gpa = vm->sworld_control.sworld_memory.base_gpa_in_sos;
uint64_t size = vm->sworld_control.sworld_memory.length;
if (vm0 == NULL) { if (vm0 == NULL) {
pr_err("Parse vm0 context failed."); pr_err("Parse vm0 context failed.");
@ -173,23 +176,18 @@ void destroy_secure_world(struct vm *vm, bool need_clr_mem)
} }
if (need_clr_mem) { if (need_clr_mem) {
/* clear trusty memory space */ /* clear trusty memory space */
(void)memset(HPA2HVA(vm->sworld_control.sworld_memory.base_hpa), (void)memset(HPA2HVA(hpa), 0U, size);
0U, vm->sworld_control.sworld_memory.length);
} }
/* restore memory to SOS ept mapping */ /* restore memory to SOS ept mapping */
if (ept_mr_add(vm0, vm->sworld_control.sworld_memory.base_hpa, if (ept_mr_add(vm0, vm0->arch_vm.nworld_eptp,
vm->sworld_control.sworld_memory.base_gpa_in_sos, hpa, gpa, size, EPT_RWX | EPT_WB) != 0) {
vm->sworld_control.sworld_memory.length,
EPT_RWX | EPT_WB) != 0) {
pr_warn("Restore trusty mem to SOS failed"); pr_warn("Restore trusty mem to SOS failed");
} }
/* Restore memory to guest normal world */ /* Restore memory to guest normal world */
if (ept_mr_add(vm, vm->sworld_control.sworld_memory.base_hpa, if (ept_mr_add(vm, vm->arch_vm.nworld_eptp,
vm->sworld_control.sworld_memory.base_gpa_in_uos, hpa, gpa, size, EPT_RWX | EPT_WB) != 0) {
vm->sworld_control.sworld_memory.length,
EPT_RWX | EPT_WB) != 0) {
pr_warn("Restore trusty mem to nworld failed"); pr_warn("Restore trusty mem to nworld failed");
} }

View File

@ -422,6 +422,7 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
{ {
uint64_t hpa, base_paddr; uint64_t hpa, base_paddr;
uint64_t prot; uint64_t prot;
uint64_t *pml4_page;
if ((region->size & (CPU_PAGE_SIZE - 1UL)) != 0UL) { if ((region->size & (CPU_PAGE_SIZE - 1UL)) != 0UL) {
pr_err("%s: [vm%d] map size 0x%x is not page aligned", pr_err("%s: [vm%d] map size 0x%x is not page aligned",
@ -442,6 +443,7 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
return -EFAULT; return -EFAULT;
} }
pml4_page = (uint64_t *)target_vm->arch_vm.nworld_eptp;
if (region->type != MR_DEL) { if (region->type != MR_DEL) {
prot = 0UL; prot = 0UL;
/* access right */ /* access right */
@ -467,11 +469,10 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
prot |= EPT_UNCACHED; prot |= EPT_UNCACHED;
} }
/* create gpa to hpa EPT mapping */ /* create gpa to hpa EPT mapping */
return ept_mr_add(target_vm, hpa, return ept_mr_add(target_vm, pml4_page, hpa,
region->gpa, region->size, prot); region->gpa, region->size, prot);
} else { } else {
return ept_mr_del(target_vm, return ept_mr_del(target_vm, pml4_page,
(uint64_t *)target_vm->arch_vm.nworld_eptp,
region->gpa, region->size); region->gpa, region->size);
} }

View File

@ -380,8 +380,8 @@ void destroy_ept(struct vm *vm);
uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa); uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa);
uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size); uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size);
uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa); uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa);
int ept_mr_add(const struct vm *vm, uint64_t hpa_arg, int ept_mr_add(const struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
uint64_t gpa_arg, uint64_t size, uint32_t prot_arg); uint64_t gpa, uint64_t size, uint64_t prot_orig);
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page, int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size, uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr); uint64_t prot_set, uint64_t prot_clr);