mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-26 15:31:35 +00:00
hv: ept: store virtual address of EPT PML4 table
Most of the time, we use the virtual address of EPT PMl4 table, not physical address. Signed-off-by: Li, Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
23a5c74ac7
commit
1815a1bb01
@ -98,10 +98,10 @@ static void free_ept_mem(void *pml4_addr)
|
||||
|
||||
void destroy_ept(struct vm *vm)
|
||||
{
|
||||
if (vm->arch_vm.nworld_eptp != 0U)
|
||||
free_ept_mem(HPA2HVA(vm->arch_vm.nworld_eptp));
|
||||
if (vm->arch_vm.m2p != 0U)
|
||||
free_ept_mem(HPA2HVA(vm->arch_vm.m2p));
|
||||
if (vm->arch_vm.nworld_eptp != NULL)
|
||||
free_ept_mem(vm->arch_vm.nworld_eptp);
|
||||
if (vm->arch_vm.m2p != NULL)
|
||||
free_ept_mem(vm->arch_vm.m2p);
|
||||
|
||||
/*
|
||||
* If secure world is initialized, destroy Secure world ept.
|
||||
@ -110,9 +110,10 @@ void destroy_ept(struct vm *vm)
|
||||
* - trusty is enabled. But not initialized yet.
|
||||
* Check vm->arch_vm.sworld_eptp.
|
||||
*/
|
||||
if (vm->sworld_control.sworld_enabled && (vm->arch_vm.sworld_eptp != 0U)) {
|
||||
free_ept_mem(HPA2HVA(vm->arch_vm.sworld_eptp));
|
||||
vm->arch_vm.sworld_eptp = 0UL;
|
||||
if (vm->sworld_control.sworld_enabled &&
|
||||
(vm->arch_vm.sworld_eptp != NULL)) {
|
||||
free_ept_mem(vm->arch_vm.sworld_eptp);
|
||||
vm->arch_vm.sworld_eptp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,8 +125,8 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
|
||||
struct map_params map_params;
|
||||
|
||||
map_params.page_table_type = PTT_EPT;
|
||||
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
|
||||
map_params.pml4_base = vm->arch_vm.nworld_eptp;
|
||||
map_params.pml4_inverted = vm->arch_vm.m2p;
|
||||
obtain_last_page_table_entry(&map_params, &entry, (void *)gpa, true);
|
||||
if (entry.entry_present == PT_PRESENT) {
|
||||
hpa = ((entry.entry_val & (~(entry.page_size - 1)))
|
||||
@ -156,8 +157,8 @@ uint64_t hpa2gpa(struct vm *vm, uint64_t hpa)
|
||||
struct map_params map_params;
|
||||
|
||||
map_params.page_table_type = PTT_EPT;
|
||||
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
|
||||
map_params.pml4_base = vm->arch_vm.nworld_eptp;
|
||||
map_params.pml4_inverted = vm->arch_vm.m2p;
|
||||
|
||||
obtain_last_page_table_entry(&map_params, &entry,
|
||||
(void *)hpa, false);
|
||||
@ -494,15 +495,12 @@ int ept_mmap(struct vm *vm, uint64_t hpa,
|
||||
|
||||
/* Setup memory map parameters */
|
||||
map_params.page_table_type = PTT_EPT;
|
||||
if (vm->arch_vm.nworld_eptp != 0U) {
|
||||
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
|
||||
} else {
|
||||
map_params.pml4_base = alloc_paging_struct();
|
||||
vm->arch_vm.nworld_eptp = HVA2HPA(map_params.pml4_base);
|
||||
map_params.pml4_inverted = alloc_paging_struct();
|
||||
vm->arch_vm.m2p = HVA2HPA(map_params.pml4_inverted);
|
||||
if (vm->arch_vm.nworld_eptp == NULL) {
|
||||
vm->arch_vm.nworld_eptp = alloc_paging_struct();
|
||||
vm->arch_vm.m2p = alloc_paging_struct();
|
||||
}
|
||||
map_params.pml4_base = vm->arch_vm.nworld_eptp;
|
||||
map_params.pml4_inverted = vm->arch_vm.m2p;
|
||||
|
||||
if (type == MAP_MEM || type == MAP_MMIO) {
|
||||
/* EPT & VT-d share the same page tables, set SNP bit
|
||||
@ -540,7 +538,7 @@ int ept_mr_modify(struct vm *vm, uint64_t gpa, uint64_t size,
|
||||
uint16_t i;
|
||||
int ret;
|
||||
|
||||
ret = mmu_modify((uint64_t *)HPA2HVA(vm->arch_vm.nworld_eptp),
|
||||
ret = mmu_modify((uint64_t *)vm->arch_vm.nworld_eptp,
|
||||
gpa, size, attr_set, attr_clr, PTT_EPT);
|
||||
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
|
@ -178,11 +178,12 @@ void invept(struct vcpu *vcpu)
|
||||
struct invept_desc desc = {0};
|
||||
|
||||
if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_SINGLE_CONTEXT)) {
|
||||
desc.eptp = vcpu->vm->arch_vm.nworld_eptp | (3UL << 3U) | 6UL;
|
||||
desc.eptp = HVA2HPA(vcpu->vm->arch_vm.nworld_eptp) |
|
||||
(3UL << 3U) | 6UL;
|
||||
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
|
||||
if (vcpu->vm->sworld_control.sworld_enabled &&
|
||||
vcpu->vm->arch_vm.sworld_eptp) {
|
||||
desc.eptp = vcpu->vm->arch_vm.sworld_eptp
|
||||
vcpu->vm->arch_vm.sworld_eptp != NULL) {
|
||||
desc.eptp = HVA2HPA(vcpu->vm->arch_vm.sworld_eptp)
|
||||
| (3UL << 3U) | 6UL;
|
||||
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
}
|
||||
|
||||
if (!vm->sworld_control.sworld_enabled
|
||||
|| vm->arch_vm.sworld_eptp != 0UL) {
|
||||
|| vm->arch_vm.sworld_eptp != NULL) {
|
||||
pr_err("Sworld is not enabled or Sworld eptp is not NULL");
|
||||
return;
|
||||
}
|
||||
@ -108,10 +108,10 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
}
|
||||
|
||||
map_params.page_table_type = PTT_EPT;
|
||||
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
|
||||
map_params.pml4_inverted = vm->arch_vm.m2p;
|
||||
|
||||
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
|
||||
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
|
||||
map_params.pml4_base = vm->arch_vm.nworld_eptp;
|
||||
unmap_mem(&map_params, (void *)hpa, (void *)gpa_orig, size, 0U);
|
||||
|
||||
/* Copy PDPT entries from Normal world to Secure world
|
||||
@ -122,7 +122,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
* and Normal World's EPT
|
||||
*/
|
||||
pml4_base = alloc_paging_struct();
|
||||
vm->arch_vm.sworld_eptp = HVA2HPA(pml4_base);
|
||||
vm->arch_vm.sworld_eptp = pml4_base;
|
||||
|
||||
/* The trusty memory is remapped to guest physical address
|
||||
* of gpa_rebased to gpa_rebased + size
|
||||
@ -132,7 +132,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
mem_write64(pml4_base, sworld_pml4e);
|
||||
|
||||
|
||||
nworld_pml4e = mem_read64(HPA2HVA(vm->arch_vm.nworld_eptp));
|
||||
nworld_pml4e = mem_read64(vm->arch_vm.nworld_eptp);
|
||||
|
||||
/*
|
||||
* copy PTPDEs from normal world EPT to secure world EPT,
|
||||
@ -162,8 +162,8 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
IA32E_EPT_WB));
|
||||
|
||||
/* Unmap trusty memory space from sos ept mapping*/
|
||||
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
|
||||
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
|
||||
map_params.pml4_inverted = vm0->arch_vm.m2p;
|
||||
/* Get the gpa address in SOS */
|
||||
gpa = hpa2gpa(vm0, hpa);
|
||||
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0);
|
||||
@ -199,8 +199,8 @@ void destroy_secure_world(struct vm *vm)
|
||||
|
||||
/* restore memory to SOS ept mapping */
|
||||
map_params.page_table_type = PTT_EPT;
|
||||
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
|
||||
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
|
||||
map_params.pml4_inverted = vm0->arch_vm.m2p;
|
||||
|
||||
map_mem(&map_params, (void *)vm->sworld_control.sworld_memory.base_hpa,
|
||||
(void *)vm->sworld_control.sworld_memory.base_gpa,
|
||||
@ -333,10 +333,12 @@ void switch_world(struct vcpu *vcpu, int next_world)
|
||||
/* load EPTP for next world */
|
||||
if (next_world == NORMAL_WORLD) {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
vcpu->vm->arch_vm.nworld_eptp | (3UL<<3) | 6UL);
|
||||
HVA2HPA(vcpu->vm->arch_vm.nworld_eptp) |
|
||||
(3UL<<3) | 6UL);
|
||||
} else {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
vcpu->vm->arch_vm.sworld_eptp | (3UL<<3) | 6UL);
|
||||
HVA2HPA(vcpu->vm->arch_vm.sworld_eptp) |
|
||||
(3UL<<3) | 6UL);
|
||||
}
|
||||
|
||||
/* Update world index */
|
||||
@ -472,7 +474,7 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
|
||||
trusty_base_hpa = vm->sworld_control.sworld_memory.base_hpa;
|
||||
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
vm->arch_vm.sworld_eptp | (3UL<<3) | 6UL);
|
||||
HVA2HPA(vm->arch_vm.sworld_eptp) | (3UL<<3) | 6UL);
|
||||
|
||||
/* save Normal World context */
|
||||
save_world_ctx(&vcpu->arch_vcpu.contexts[NORMAL_WORLD]);
|
||||
|
@ -16,8 +16,8 @@ uint64_t gpa2hpa_for_trusty(struct vm *vm, uint64_t gpa)
|
||||
struct map_params map_params;
|
||||
|
||||
map_params.page_table_type = PTT_EPT;
|
||||
map_params.pml4_base = HPA2HVA(vm->arch_vm.sworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
|
||||
map_params.pml4_base = vm->arch_vm.sworld_eptp;
|
||||
map_params.pml4_inverted = vm->arch_vm.m2p;
|
||||
obtain_last_page_table_entry(&map_params, &entry, (void *)gpa, true);
|
||||
if (entry.entry_present == PT_PRESENT) {
|
||||
hpa = ((entry.entry_val & (~(entry.page_size - 1UL)))
|
||||
@ -75,7 +75,7 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
}
|
||||
|
||||
if (!vm->sworld_control.sworld_enabled
|
||||
|| vm->arch_vm.sworld_eptp != 0UL) {
|
||||
|| vm->arch_vm.sworld_eptp != NULL) {
|
||||
pr_err("Sworld is not enabled or Sworld eptp is not NULL");
|
||||
return;
|
||||
}
|
||||
@ -97,7 +97,7 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
* and Normal World's EPT
|
||||
*/
|
||||
pml4_base = alloc_paging_struct();
|
||||
vm->arch_vm.sworld_eptp = HVA2HPA(pml4_base);
|
||||
vm->arch_vm.sworld_eptp = pml4_base;
|
||||
|
||||
/* The trusty memory is remapped to guest physical address
|
||||
* of gpa_rebased to gpa_rebased + size
|
||||
@ -108,7 +108,7 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
IA32E_EPT_X_BIT;
|
||||
mem_write64(pml4_base, sworld_pml4e);
|
||||
|
||||
nworld_pml4e = mem_read64(HPA2HVA(vm->arch_vm.nworld_eptp));
|
||||
nworld_pml4e = mem_read64(vm->arch_vm.nworld_eptp);
|
||||
(void)memcpy_s(HPA2HVA(sworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE,
|
||||
HPA2HVA(nworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE);
|
||||
|
||||
@ -116,8 +116,8 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
map_params.page_table_type = PTT_EPT;
|
||||
|
||||
while (size > 0) {
|
||||
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
|
||||
map_params.pml4_base = vm->arch_vm.nworld_eptp;
|
||||
map_params.pml4_inverted = vm->arch_vm.m2p;
|
||||
obtain_last_page_table_entry(&map_params, &entry,
|
||||
(void *)gpa_uos, true);
|
||||
mod = (gpa_uos % entry.page_size);
|
||||
@ -131,7 +131,7 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
(void *)gpa_uos, adjust_size, 0U);
|
||||
|
||||
/* Map to secure world */
|
||||
map_params.pml4_base = HPA2HVA(vm->arch_vm.sworld_eptp);
|
||||
map_params.pml4_base = vm->arch_vm.sworld_eptp;
|
||||
map_mem(&map_params, (void *)hpa,
|
||||
(void *)gpa_rebased, adjust_size,
|
||||
(IA32E_EPT_R_BIT |
|
||||
@ -140,8 +140,8 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
IA32E_EPT_WB));
|
||||
|
||||
/* Unmap trusty memory space from sos ept mapping*/
|
||||
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
|
||||
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
|
||||
map_params.pml4_inverted = vm0->arch_vm.m2p;
|
||||
/* Get the gpa address in SOS */
|
||||
gpa_sos = hpa2gpa(vm0, hpa);
|
||||
|
||||
@ -183,8 +183,8 @@ void destroy_secure_world(struct vm *vm)
|
||||
map_params.page_table_type = PTT_EPT;
|
||||
while (size > 0) {
|
||||
/* clear trusty memory space */
|
||||
map_params.pml4_base = HPA2HVA(vm->arch_vm.sworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
|
||||
map_params.pml4_base = vm->arch_vm.sworld_eptp;
|
||||
map_params.pml4_inverted = vm->arch_vm.m2p;
|
||||
obtain_last_page_table_entry(&map_params, &entry,
|
||||
(void *)gpa, true);
|
||||
hpa = gpa2hpa_for_trusty(vm, gpa);
|
||||
@ -195,8 +195,8 @@ void destroy_secure_world(struct vm *vm)
|
||||
|
||||
(void)memset(HPA2HVA(hpa), 0, adjust_size);
|
||||
/* restore memory to SOS ept mapping */
|
||||
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
|
||||
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
|
||||
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
|
||||
map_params.pml4_inverted = vm0->arch_vm.m2p;
|
||||
/* here gpa=hpa because sos 1:1 mapping
|
||||
* this is a temp solution
|
||||
*/
|
||||
|
@ -1344,7 +1344,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
* TODO: introduce API to make this data driven based
|
||||
* on VMX_EPT_VPID_CAP
|
||||
*/
|
||||
value64 = vm->arch_vm.nworld_eptp | (3UL << 3U) | 6UL;
|
||||
value64 = HVA2HPA(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL;
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL, value64);
|
||||
pr_dbg("VMX_EPT_POINTER: 0x%016llx ", value64);
|
||||
|
||||
|
@ -616,14 +616,14 @@ int64_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param)
|
||||
|
||||
/* create a iommu domain for target VM if not created */
|
||||
if (target_vm->iommu_domain == NULL) {
|
||||
if (target_vm->arch_vm.nworld_eptp == 0UL) {
|
||||
if (target_vm->arch_vm.nworld_eptp == NULL) {
|
||||
pr_err("%s, EPT of VM not set!\n",
|
||||
__func__, target_vm->attr.id);
|
||||
return -EPERM;
|
||||
}
|
||||
/* TODO: how to get vm's address width? */
|
||||
target_vm->iommu_domain = create_iommu_domain(vmid,
|
||||
target_vm->arch_vm.nworld_eptp, 48U);
|
||||
HVA2HPA(target_vm->arch_vm.nworld_eptp), 48U);
|
||||
if (target_vm->iommu_domain == NULL) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ int64_t hcall_world_switch(struct vcpu *vcpu)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (vcpu->vm->arch_vm.sworld_eptp == 0U) {
|
||||
if (vcpu->vm->arch_vm.sworld_eptp == NULL) {
|
||||
pr_err("%s, Trusty is not initialized!\n", __func__);
|
||||
return -EPERM;
|
||||
}
|
||||
@ -44,7 +44,7 @@ int64_t hcall_initialize_trusty(struct vcpu *vcpu, uint64_t param)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (vcpu->vm->arch_vm.sworld_eptp != 0U) {
|
||||
if (vcpu->vm->arch_vm.sworld_eptp != NULL) {
|
||||
pr_err("%s, Trusty already initialized!\n", __func__);
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -87,13 +87,13 @@ enum vm_state {
|
||||
struct vm_arch {
|
||||
uint64_t guest_init_pml4;/* Guest init pml4 */
|
||||
/* EPT hierarchy for Normal World */
|
||||
uint64_t nworld_eptp;
|
||||
void *nworld_eptp;
|
||||
/* EPT hierarchy for Secure World
|
||||
* Secure world can access Normal World's memory,
|
||||
* but Normal World can not access Secure World's memory.
|
||||
*/
|
||||
uint64_t sworld_eptp;
|
||||
uint64_t m2p; /* machine address to guest physical address */
|
||||
void *sworld_eptp;
|
||||
void *m2p; /* machine address to guest physical address */
|
||||
void *tmp_pg_array; /* Page array for tmp guest paging struct */
|
||||
void *iobitmap[2];/* IO bitmap page array base address for this VM */
|
||||
void *msr_bitmap; /* MSR bitmap page base address for this VM */
|
||||
|
Loading…
Reference in New Issue
Block a user