ept: change eptp to PA

eptp should be record as PA.

this patch changed nworld_eptp, sworld_eptp and m2p eptp to PA type,
necessary HPA2HVA/HVA2HPA transition is used for them after the change.

Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Jason Chen CJ 2018-04-03 22:25:40 +08:00 committed by lijinxia
parent 50630c2e4b
commit d937dd09df
7 changed files with 41 additions and 42 deletions

View File

@ -138,11 +138,11 @@ void free_ept_mem(void *pml4_addr)
void destroy_ept(struct vm *vm)
{
free_ept_mem(vm->arch_vm.nworld_eptp);
free_ept_mem(vm->arch_vm.m2p);
free_ept_mem(HPA2HVA(vm->arch_vm.nworld_eptp));
free_ept_mem(HPA2HVA(vm->arch_vm.m2p));
/* Destroy Secure world ept */
if (vm->sworld_control.sworld_enabled)
free_ept_mem(vm->arch_vm.sworld_eptp);
free_ept_mem(HPA2HVA(vm->arch_vm.sworld_eptp));
}
uint64_t gpa2hpa_check(struct vm *vm, uint64_t gpa,
@ -154,8 +154,8 @@ uint64_t gpa2hpa_check(struct vm *vm, uint64_t gpa,
struct map_params map_params;
map_params.page_table_type = PTT_EPT;
map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm->arch_vm.m2p;
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
obtain_last_page_table_entry(&map_params, &entry,
(void *)gpa, true);
if (entry.entry_present == PT_PRESENT
@ -193,8 +193,8 @@ uint64_t hpa2gpa(struct vm *vm, uint64_t hpa)
struct map_params map_params;
map_params.page_table_type = PTT_EPT;
map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm->arch_vm.m2p;
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
obtain_last_page_table_entry(&map_params, &entry,
(void *)hpa, false);
@ -544,14 +544,13 @@ int ept_mmap(struct vm *vm, uint64_t hpa,
/* Setup memory map parameters */
map_params.page_table_type = PTT_EPT;
if (vm->arch_vm.nworld_eptp) {
map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm->arch_vm.m2p;
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
} else {
map_params.pml4_base =
alloc_paging_struct();
vm->arch_vm.nworld_eptp = map_params.pml4_base;
map_params.pml4_base = alloc_paging_struct();
vm->arch_vm.nworld_eptp = HVA2HPA(map_params.pml4_base);
map_params.pml4_inverted = alloc_paging_struct();
vm->arch_vm.m2p = map_params.pml4_inverted;
vm->arch_vm.m2p = HVA2HPA(map_params.pml4_inverted);
}
if (type == MAP_MEM || type == MAP_MMIO) {

View File

@ -140,11 +140,10 @@ void invept(struct vcpu *vcpu)
struct invept_desc desc = {0};
if (check_invept_single_support()) {
desc.eptp = (uint64_t) vcpu->vm->arch_vm.nworld_eptp
| (3 << 3) | 6;
desc.eptp = vcpu->vm->arch_vm.nworld_eptp | (3 << 3) | 6;
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
if (vcpu->vm->sworld_control.sworld_enabled) {
desc.eptp = (uint64_t) vcpu->vm->arch_vm.sworld_eptp
desc.eptp = vcpu->vm->arch_vm.sworld_eptp
| (3 << 3) | 6;
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);

View File

@ -107,11 +107,11 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
uint64_t table_present = (IA32E_EPT_R_BIT |
IA32E_EPT_W_BIT |
IA32E_EPT_X_BIT);
void *sub_table_addr = NULL;
void *sub_table_addr = NULL, *pml4_base = NULL;
struct vm *vm0 = get_vm_from_vmid(0);
if (!vm->sworld_control.sworld_enabled
|| vm->arch_vm.sworld_eptp != NULL) {
|| vm->arch_vm.sworld_eptp != 0) {
pr_err("Sworld is not enabled or Sworld eptp is not NULL");
return;
}
@ -123,10 +123,10 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
}
map_params.page_table_type = PTT_EPT;
map_params.pml4_inverted = vm->arch_vm.m2p;
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
unmap_mem(&map_params, (void *)hpa, (void *)gpa_orig, size, 0);
/* Copy PDPT entries from Normal world to Secure world
@ -136,24 +136,25 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
* Normal World.PD/PT are shared in both Secure world's EPT
* and Normal World's EPT
*/
vm->arch_vm.sworld_eptp = alloc_paging_struct();
pml4_base = alloc_paging_struct();
vm->arch_vm.sworld_eptp = HVA2HPA(pml4_base);
/* The trusty memory is remapped to guest physical address
* of gpa_rebased to gpa_rebased + size
*/
sub_table_addr = alloc_paging_struct();
sworld_pml4e = HVA2HPA(sub_table_addr)
| table_present;
MEM_WRITE64(vm->arch_vm.sworld_eptp, sworld_pml4e);
sworld_pml4e = HVA2HPA(sub_table_addr) | table_present;
MEM_WRITE64(pml4_base, sworld_pml4e);
nworld_pml4e = MEM_READ64(vm->arch_vm.nworld_eptp);
nworld_pml4e = MEM_READ64(HPA2HVA(vm->arch_vm.nworld_eptp));
memcpy_s(HPA2HVA(sworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE,
HPA2HVA(nworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE);
/* Map gpa_rebased~gpa_rebased+size
* to secure ept mapping
*/
map_params.pml4_base = vm->arch_vm.sworld_eptp;
map_params.pml4_base = pml4_base;
map_mem(&map_params, (void *)hpa,
(void *)gpa_rebased, size,
(MMU_MEM_ATTR_READ |
@ -162,8 +163,8 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
MMU_MEM_ATTR_WB_CACHE));
/* Unmap trusty memory space from sos ept mapping*/
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm0->arch_vm.m2p;
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
/* Get the gpa address in SOS */
gpa = hpa2gpa(vm0, hpa);
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0);
@ -190,8 +191,8 @@ void destroy_secure_world(struct vm *vm)
/* restore memory to SOS ept mapping */
map_params.page_table_type = PTT_EPT;
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm0->arch_vm.m2p;
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
map_mem(&map_params, (void *)vm->sworld_control.sworld_memory.base_hpa,
(void *)vm->sworld_control.sworld_memory.base_gpa,
@ -313,10 +314,10 @@ void switch_world(struct vcpu *vcpu, int next_world)
/* load EPTP for next world */
if (next_world == NORMAL_WORLD) {
exec_vmwrite64(VMX_EPT_POINTER_FULL,
((uint64_t)vcpu->vm->arch_vm.nworld_eptp) | (3<<3) | 6);
vcpu->vm->arch_vm.nworld_eptp | (3<<3) | 6);
} else {
exec_vmwrite64(VMX_EPT_POINTER_FULL,
((uint64_t)vcpu->vm->arch_vm.sworld_eptp) | (3<<3) | 6);
vcpu->vm->arch_vm.sworld_eptp | (3<<3) | 6);
}
/* Update world index */
@ -430,7 +431,7 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
trusty_base_hpa = vm->sworld_control.sworld_memory.base_hpa;
exec_vmwrite64(VMX_EPT_POINTER_FULL,
((uint64_t)vm->arch_vm.sworld_eptp) | (3<<3) | 6);
vm->arch_vm.sworld_eptp | (3<<3) | 6);
/* save Normal World context */
save_world_ctx(&vcpu->arch_vcpu.contexts[NORMAL_WORLD]);

View File

@ -972,7 +972,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
* TODO: introduce API to make this data driven based
* on VMX_EPT_VPID_CAP
*/
value64 = ((uint64_t) vm->arch_vm.nworld_eptp) | (3 << 3) | 6;
value64 = vm->arch_vm.nworld_eptp | (3 << 3) | 6;
exec_vmwrite64(VMX_EPT_POINTER_FULL, value64);
pr_dbg("VMX_EPT_POINTER: 0x%016llx ", value64);

View File

@ -176,7 +176,7 @@ struct iommu_domain {
uint16_t dom_id;
int vm_id;
uint32_t addr_width; /* address width of the domain */
void *trans_table_ptr;
uint64_t trans_table_ptr;
};
static struct list_head dmar_drhd_units;
@ -522,7 +522,7 @@ static struct iommu_domain *create_host_domain(void)
domain->is_host = true;
domain->dom_id = alloc_domain_id();
/* dmar uint need to support translation passthrough */
domain->trans_table_ptr = NULL;
domain->trans_table_ptr = 0;
domain->addr_width = 48;
return domain;
@ -853,7 +853,7 @@ static void dmar_disable(struct dmar_drhd_rt *dmar_uint)
dmar_fault_event_mask(dmar_uint);
}
struct iommu_domain *create_iommu_domain(int vm_id, void *translation_table,
struct iommu_domain *create_iommu_domain(int vm_id, uint64_t translation_table,
int addr_width)
{
struct iommu_domain *domain;
@ -1023,7 +1023,7 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
upper = DMAR_SET_BITSLICE(upper, CTX_ENTRY_UPPER_DID, domain->dom_id);
lower = DMAR_SET_BITSLICE(lower, CTX_ENTRY_LOWER_SLPTPTR,
(uint64_t)domain->trans_table_ptr >> 12);
domain->trans_table_ptr >> 12);
lower = DMAR_SET_BITSLICE(lower, CTX_ENTRY_LOWER_P, 1);
context_entry->upper = upper;

View File

@ -115,13 +115,13 @@ struct vm_state_info {
struct vm_arch {
void *guest_pml4; /* Guest pml4 */
/* EPT hierarchy for Normal World */
void *nworld_eptp;
uint64_t nworld_eptp;
/* EPT hierarchy for Secure World
* Secure world can access Normal World's memory,
* but Normal World can not access Secure World's memory.
*/
void *sworld_eptp;
void *m2p; /* machine address to guest physical address */
uint64_t sworld_eptp;
uint64_t m2p; /* machine address to guest physical address */
void *tmp_pg_array; /* Page array for tmp guest paging struct */
void *iobitmap[2];/* IO bitmap page array base address for this VM */
void *msr_bitmap; /* MSR bitmap page base address for this VM */

View File

@ -242,7 +242,7 @@ int unassign_iommu_device(struct iommu_domain *domain,
/* Create a iommu domain for a VM specified by vm_id */
struct iommu_domain *create_iommu_domain(int vm_id,
void *translation_table, int addr_width);
uint64_t translation_table, int addr_width);
/* Destroy the iommu domain */
int destroy_iommu_domain(struct iommu_domain *domain);