hv: ept: store virtual address of EPT PML4 table

Most of the time, we use the virtual address of EPT PMl4 table,
not physical address.

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li, Fei1 2018-07-17 16:23:33 +08:00 committed by lijinxia
parent 23a5c74ac7
commit 1815a1bb01
8 changed files with 58 additions and 57 deletions

View File

@ -98,10 +98,10 @@ static void free_ept_mem(void *pml4_addr)
void destroy_ept(struct vm *vm) void destroy_ept(struct vm *vm)
{ {
if (vm->arch_vm.nworld_eptp != 0U) if (vm->arch_vm.nworld_eptp != NULL)
free_ept_mem(HPA2HVA(vm->arch_vm.nworld_eptp)); free_ept_mem(vm->arch_vm.nworld_eptp);
if (vm->arch_vm.m2p != 0U) if (vm->arch_vm.m2p != NULL)
free_ept_mem(HPA2HVA(vm->arch_vm.m2p)); free_ept_mem(vm->arch_vm.m2p);
/* /*
* If secure world is initialized, destroy Secure world ept. * If secure world is initialized, destroy Secure world ept.
@ -110,9 +110,10 @@ void destroy_ept(struct vm *vm)
* - trusty is enabled. But not initialized yet. * - trusty is enabled. But not initialized yet.
* Check vm->arch_vm.sworld_eptp. * Check vm->arch_vm.sworld_eptp.
*/ */
if (vm->sworld_control.sworld_enabled && (vm->arch_vm.sworld_eptp != 0U)) { if (vm->sworld_control.sworld_enabled &&
free_ept_mem(HPA2HVA(vm->arch_vm.sworld_eptp)); (vm->arch_vm.sworld_eptp != NULL)) {
vm->arch_vm.sworld_eptp = 0UL; free_ept_mem(vm->arch_vm.sworld_eptp);
vm->arch_vm.sworld_eptp = NULL;
} }
} }
@ -124,8 +125,8 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
struct map_params map_params; struct map_params map_params;
map_params.page_table_type = PTT_EPT; map_params.page_table_type = PTT_EPT;
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp); map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p); map_params.pml4_inverted = vm->arch_vm.m2p;
obtain_last_page_table_entry(&map_params, &entry, (void *)gpa, true); obtain_last_page_table_entry(&map_params, &entry, (void *)gpa, true);
if (entry.entry_present == PT_PRESENT) { if (entry.entry_present == PT_PRESENT) {
hpa = ((entry.entry_val & (~(entry.page_size - 1))) hpa = ((entry.entry_val & (~(entry.page_size - 1)))
@ -156,8 +157,8 @@ uint64_t hpa2gpa(struct vm *vm, uint64_t hpa)
struct map_params map_params; struct map_params map_params;
map_params.page_table_type = PTT_EPT; map_params.page_table_type = PTT_EPT;
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp); map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p); map_params.pml4_inverted = vm->arch_vm.m2p;
obtain_last_page_table_entry(&map_params, &entry, obtain_last_page_table_entry(&map_params, &entry,
(void *)hpa, false); (void *)hpa, false);
@ -494,15 +495,12 @@ int ept_mmap(struct vm *vm, uint64_t hpa,
/* Setup memory map parameters */ /* Setup memory map parameters */
map_params.page_table_type = PTT_EPT; map_params.page_table_type = PTT_EPT;
if (vm->arch_vm.nworld_eptp != 0U) { if (vm->arch_vm.nworld_eptp == NULL) {
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp); vm->arch_vm.nworld_eptp = alloc_paging_struct();
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p); vm->arch_vm.m2p = alloc_paging_struct();
} else {
map_params.pml4_base = alloc_paging_struct();
vm->arch_vm.nworld_eptp = HVA2HPA(map_params.pml4_base);
map_params.pml4_inverted = alloc_paging_struct();
vm->arch_vm.m2p = HVA2HPA(map_params.pml4_inverted);
} }
map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm->arch_vm.m2p;
if (type == MAP_MEM || type == MAP_MMIO) { if (type == MAP_MEM || type == MAP_MMIO) {
/* EPT & VT-d share the same page tables, set SNP bit /* EPT & VT-d share the same page tables, set SNP bit
@ -540,7 +538,7 @@ int ept_mr_modify(struct vm *vm, uint64_t gpa, uint64_t size,
uint16_t i; uint16_t i;
int ret; int ret;
ret = mmu_modify((uint64_t *)HPA2HVA(vm->arch_vm.nworld_eptp), ret = mmu_modify((uint64_t *)vm->arch_vm.nworld_eptp,
gpa, size, attr_set, attr_clr, PTT_EPT); gpa, size, attr_set, attr_clr, PTT_EPT);
foreach_vcpu(i, vm, vcpu) { foreach_vcpu(i, vm, vcpu) {

View File

@ -178,11 +178,12 @@ void invept(struct vcpu *vcpu)
struct invept_desc desc = {0}; struct invept_desc desc = {0};
if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_SINGLE_CONTEXT)) { if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_SINGLE_CONTEXT)) {
desc.eptp = vcpu->vm->arch_vm.nworld_eptp | (3UL << 3U) | 6UL; desc.eptp = HVA2HPA(vcpu->vm->arch_vm.nworld_eptp) |
(3UL << 3U) | 6UL;
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc); _invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
if (vcpu->vm->sworld_control.sworld_enabled && if (vcpu->vm->sworld_control.sworld_enabled &&
vcpu->vm->arch_vm.sworld_eptp) { vcpu->vm->arch_vm.sworld_eptp != NULL) {
desc.eptp = vcpu->vm->arch_vm.sworld_eptp desc.eptp = HVA2HPA(vcpu->vm->arch_vm.sworld_eptp)
| (3UL << 3U) | 6UL; | (3UL << 3U) | 6UL;
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc); _invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
} }

View File

@ -96,7 +96,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
} }
if (!vm->sworld_control.sworld_enabled if (!vm->sworld_control.sworld_enabled
|| vm->arch_vm.sworld_eptp != 0UL) { || vm->arch_vm.sworld_eptp != NULL) {
pr_err("Sworld is not enabled or Sworld eptp is not NULL"); pr_err("Sworld is not enabled or Sworld eptp is not NULL");
return; return;
} }
@ -108,10 +108,10 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
} }
map_params.page_table_type = PTT_EPT; map_params.page_table_type = PTT_EPT;
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p); map_params.pml4_inverted = vm->arch_vm.m2p;
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */ /* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp); map_params.pml4_base = vm->arch_vm.nworld_eptp;
unmap_mem(&map_params, (void *)hpa, (void *)gpa_orig, size, 0U); unmap_mem(&map_params, (void *)hpa, (void *)gpa_orig, size, 0U);
/* Copy PDPT entries from Normal world to Secure world /* Copy PDPT entries from Normal world to Secure world
@ -122,7 +122,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
* and Normal World's EPT * and Normal World's EPT
*/ */
pml4_base = alloc_paging_struct(); pml4_base = alloc_paging_struct();
vm->arch_vm.sworld_eptp = HVA2HPA(pml4_base); vm->arch_vm.sworld_eptp = pml4_base;
/* The trusty memory is remapped to guest physical address /* The trusty memory is remapped to guest physical address
* of gpa_rebased to gpa_rebased + size * of gpa_rebased to gpa_rebased + size
@ -132,7 +132,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
mem_write64(pml4_base, sworld_pml4e); mem_write64(pml4_base, sworld_pml4e);
nworld_pml4e = mem_read64(HPA2HVA(vm->arch_vm.nworld_eptp)); nworld_pml4e = mem_read64(vm->arch_vm.nworld_eptp);
/* /*
* copy PTPDEs from normal world EPT to secure world EPT, * copy PTPDEs from normal world EPT to secure world EPT,
@ -162,8 +162,8 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
IA32E_EPT_WB)); IA32E_EPT_WB));
/* Unmap trusty memory space from sos ept mapping*/ /* Unmap trusty memory space from sos ept mapping*/
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp); map_params.pml4_base = vm0->arch_vm.nworld_eptp;
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p); map_params.pml4_inverted = vm0->arch_vm.m2p;
/* Get the gpa address in SOS */ /* Get the gpa address in SOS */
gpa = hpa2gpa(vm0, hpa); gpa = hpa2gpa(vm0, hpa);
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0); unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0);
@ -199,8 +199,8 @@ void destroy_secure_world(struct vm *vm)
/* restore memory to SOS ept mapping */ /* restore memory to SOS ept mapping */
map_params.page_table_type = PTT_EPT; map_params.page_table_type = PTT_EPT;
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp); map_params.pml4_base = vm0->arch_vm.nworld_eptp;
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p); map_params.pml4_inverted = vm0->arch_vm.m2p;
map_mem(&map_params, (void *)vm->sworld_control.sworld_memory.base_hpa, map_mem(&map_params, (void *)vm->sworld_control.sworld_memory.base_hpa,
(void *)vm->sworld_control.sworld_memory.base_gpa, (void *)vm->sworld_control.sworld_memory.base_gpa,
@ -333,10 +333,12 @@ void switch_world(struct vcpu *vcpu, int next_world)
/* load EPTP for next world */ /* load EPTP for next world */
if (next_world == NORMAL_WORLD) { if (next_world == NORMAL_WORLD) {
exec_vmwrite64(VMX_EPT_POINTER_FULL, exec_vmwrite64(VMX_EPT_POINTER_FULL,
vcpu->vm->arch_vm.nworld_eptp | (3UL<<3) | 6UL); HVA2HPA(vcpu->vm->arch_vm.nworld_eptp) |
(3UL<<3) | 6UL);
} else { } else {
exec_vmwrite64(VMX_EPT_POINTER_FULL, exec_vmwrite64(VMX_EPT_POINTER_FULL,
vcpu->vm->arch_vm.sworld_eptp | (3UL<<3) | 6UL); HVA2HPA(vcpu->vm->arch_vm.sworld_eptp) |
(3UL<<3) | 6UL);
} }
/* Update world index */ /* Update world index */
@ -472,7 +474,7 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
trusty_base_hpa = vm->sworld_control.sworld_memory.base_hpa; trusty_base_hpa = vm->sworld_control.sworld_memory.base_hpa;
exec_vmwrite64(VMX_EPT_POINTER_FULL, exec_vmwrite64(VMX_EPT_POINTER_FULL,
vm->arch_vm.sworld_eptp | (3UL<<3) | 6UL); HVA2HPA(vm->arch_vm.sworld_eptp) | (3UL<<3) | 6UL);
/* save Normal World context */ /* save Normal World context */
save_world_ctx(&vcpu->arch_vcpu.contexts[NORMAL_WORLD]); save_world_ctx(&vcpu->arch_vcpu.contexts[NORMAL_WORLD]);

View File

@ -16,8 +16,8 @@ uint64_t gpa2hpa_for_trusty(struct vm *vm, uint64_t gpa)
struct map_params map_params; struct map_params map_params;
map_params.page_table_type = PTT_EPT; map_params.page_table_type = PTT_EPT;
map_params.pml4_base = HPA2HVA(vm->arch_vm.sworld_eptp); map_params.pml4_base = vm->arch_vm.sworld_eptp;
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p); map_params.pml4_inverted = vm->arch_vm.m2p;
obtain_last_page_table_entry(&map_params, &entry, (void *)gpa, true); obtain_last_page_table_entry(&map_params, &entry, (void *)gpa, true);
if (entry.entry_present == PT_PRESENT) { if (entry.entry_present == PT_PRESENT) {
hpa = ((entry.entry_val & (~(entry.page_size - 1UL))) hpa = ((entry.entry_val & (~(entry.page_size - 1UL)))
@ -75,7 +75,7 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
} }
if (!vm->sworld_control.sworld_enabled if (!vm->sworld_control.sworld_enabled
|| vm->arch_vm.sworld_eptp != 0UL) { || vm->arch_vm.sworld_eptp != NULL) {
pr_err("Sworld is not enabled or Sworld eptp is not NULL"); pr_err("Sworld is not enabled or Sworld eptp is not NULL");
return; return;
} }
@ -97,7 +97,7 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
* and Normal World's EPT * and Normal World's EPT
*/ */
pml4_base = alloc_paging_struct(); pml4_base = alloc_paging_struct();
vm->arch_vm.sworld_eptp = HVA2HPA(pml4_base); vm->arch_vm.sworld_eptp = pml4_base;
/* The trusty memory is remapped to guest physical address /* The trusty memory is remapped to guest physical address
* of gpa_rebased to gpa_rebased + size * of gpa_rebased to gpa_rebased + size
@ -108,7 +108,7 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
IA32E_EPT_X_BIT; IA32E_EPT_X_BIT;
mem_write64(pml4_base, sworld_pml4e); mem_write64(pml4_base, sworld_pml4e);
nworld_pml4e = mem_read64(HPA2HVA(vm->arch_vm.nworld_eptp)); nworld_pml4e = mem_read64(vm->arch_vm.nworld_eptp);
(void)memcpy_s(HPA2HVA(sworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE, (void)memcpy_s(HPA2HVA(sworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE,
HPA2HVA(nworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE); HPA2HVA(nworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE);
@ -116,8 +116,8 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
map_params.page_table_type = PTT_EPT; map_params.page_table_type = PTT_EPT;
while (size > 0) { while (size > 0) {
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp); map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p); map_params.pml4_inverted = vm->arch_vm.m2p;
obtain_last_page_table_entry(&map_params, &entry, obtain_last_page_table_entry(&map_params, &entry,
(void *)gpa_uos, true); (void *)gpa_uos, true);
mod = (gpa_uos % entry.page_size); mod = (gpa_uos % entry.page_size);
@ -131,7 +131,7 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
(void *)gpa_uos, adjust_size, 0U); (void *)gpa_uos, adjust_size, 0U);
/* Map to secure world */ /* Map to secure world */
map_params.pml4_base = HPA2HVA(vm->arch_vm.sworld_eptp); map_params.pml4_base = vm->arch_vm.sworld_eptp;
map_mem(&map_params, (void *)hpa, map_mem(&map_params, (void *)hpa,
(void *)gpa_rebased, adjust_size, (void *)gpa_rebased, adjust_size,
(IA32E_EPT_R_BIT | (IA32E_EPT_R_BIT |
@ -140,8 +140,8 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
IA32E_EPT_WB)); IA32E_EPT_WB));
/* Unmap trusty memory space from sos ept mapping*/ /* Unmap trusty memory space from sos ept mapping*/
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp); map_params.pml4_base = vm0->arch_vm.nworld_eptp;
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p); map_params.pml4_inverted = vm0->arch_vm.m2p;
/* Get the gpa address in SOS */ /* Get the gpa address in SOS */
gpa_sos = hpa2gpa(vm0, hpa); gpa_sos = hpa2gpa(vm0, hpa);
@ -183,8 +183,8 @@ void destroy_secure_world(struct vm *vm)
map_params.page_table_type = PTT_EPT; map_params.page_table_type = PTT_EPT;
while (size > 0) { while (size > 0) {
/* clear trusty memory space */ /* clear trusty memory space */
map_params.pml4_base = HPA2HVA(vm->arch_vm.sworld_eptp); map_params.pml4_base = vm->arch_vm.sworld_eptp;
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p); map_params.pml4_inverted = vm->arch_vm.m2p;
obtain_last_page_table_entry(&map_params, &entry, obtain_last_page_table_entry(&map_params, &entry,
(void *)gpa, true); (void *)gpa, true);
hpa = gpa2hpa_for_trusty(vm, gpa); hpa = gpa2hpa_for_trusty(vm, gpa);
@ -195,8 +195,8 @@ void destroy_secure_world(struct vm *vm)
(void)memset(HPA2HVA(hpa), 0, adjust_size); (void)memset(HPA2HVA(hpa), 0, adjust_size);
/* restore memory to SOS ept mapping */ /* restore memory to SOS ept mapping */
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp); map_params.pml4_base = vm0->arch_vm.nworld_eptp;
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p); map_params.pml4_inverted = vm0->arch_vm.m2p;
/* here gpa=hpa because sos 1:1 mapping /* here gpa=hpa because sos 1:1 mapping
* this is a temp solution * this is a temp solution
*/ */

View File

@ -1344,7 +1344,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
* TODO: introduce API to make this data driven based * TODO: introduce API to make this data driven based
* on VMX_EPT_VPID_CAP * on VMX_EPT_VPID_CAP
*/ */
value64 = vm->arch_vm.nworld_eptp | (3UL << 3U) | 6UL; value64 = HVA2HPA(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL;
exec_vmwrite64(VMX_EPT_POINTER_FULL, value64); exec_vmwrite64(VMX_EPT_POINTER_FULL, value64);
pr_dbg("VMX_EPT_POINTER: 0x%016llx ", value64); pr_dbg("VMX_EPT_POINTER: 0x%016llx ", value64);

View File

@ -616,14 +616,14 @@ int64_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
/* create a iommu domain for target VM if not created */ /* create a iommu domain for target VM if not created */
if (target_vm->iommu_domain == NULL) { if (target_vm->iommu_domain == NULL) {
if (target_vm->arch_vm.nworld_eptp == 0UL) { if (target_vm->arch_vm.nworld_eptp == NULL) {
pr_err("%s, EPT of VM not set!\n", pr_err("%s, EPT of VM not set!\n",
__func__, target_vm->attr.id); __func__, target_vm->attr.id);
return -EPERM; return -EPERM;
} }
/* TODO: how to get vm's address width? */ /* TODO: how to get vm's address width? */
target_vm->iommu_domain = create_iommu_domain(vmid, target_vm->iommu_domain = create_iommu_domain(vmid,
target_vm->arch_vm.nworld_eptp, 48U); HVA2HPA(target_vm->arch_vm.nworld_eptp), 48U);
if (target_vm->iommu_domain == NULL) { if (target_vm->iommu_domain == NULL) {
return -ENODEV; return -ENODEV;
} }

View File

@ -25,7 +25,7 @@ int64_t hcall_world_switch(struct vcpu *vcpu)
return -EPERM; return -EPERM;
} }
if (vcpu->vm->arch_vm.sworld_eptp == 0U) { if (vcpu->vm->arch_vm.sworld_eptp == NULL) {
pr_err("%s, Trusty is not initialized!\n", __func__); pr_err("%s, Trusty is not initialized!\n", __func__);
return -EPERM; return -EPERM;
} }
@ -44,7 +44,7 @@ int64_t hcall_initialize_trusty(struct vcpu *vcpu, uint64_t param)
return -EPERM; return -EPERM;
} }
if (vcpu->vm->arch_vm.sworld_eptp != 0U) { if (vcpu->vm->arch_vm.sworld_eptp != NULL) {
pr_err("%s, Trusty already initialized!\n", __func__); pr_err("%s, Trusty already initialized!\n", __func__);
return -EPERM; return -EPERM;
} }

View File

@ -87,13 +87,13 @@ enum vm_state {
struct vm_arch { struct vm_arch {
uint64_t guest_init_pml4;/* Guest init pml4 */ uint64_t guest_init_pml4;/* Guest init pml4 */
/* EPT hierarchy for Normal World */ /* EPT hierarchy for Normal World */
uint64_t nworld_eptp; void *nworld_eptp;
/* EPT hierarchy for Secure World /* EPT hierarchy for Secure World
* Secure world can access Normal World's memory, * Secure world can access Normal World's memory,
* but Normal World can not access Secure World's memory. * but Normal World can not access Secure World's memory.
*/ */
uint64_t sworld_eptp; void *sworld_eptp;
uint64_t m2p; /* machine address to guest physical address */ void *m2p; /* machine address to guest physical address */
void *tmp_pg_array; /* Page array for tmp guest paging struct */ void *tmp_pg_array; /* Page array for tmp guest paging struct */
void *iobitmap[2];/* IO bitmap page array base address for this VM */ void *iobitmap[2];/* IO bitmap page array base address for this VM */
void *msr_bitmap; /* MSR bitmap page base address for this VM */ void *msr_bitmap; /* MSR bitmap page base address for this VM */