hv: trusty: refine trusty memory region mapping

Now it's using the memory reserve by hypervisor. So there's not
necessaty to map or ummap this region from SOS.

Tracked-On: #1942
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1
2018-11-28 22:32:52 +08:00
committed by wenlingz
parent bd1c0838d1
commit a0582c99cf
5 changed files with 0 additions and 66 deletions

View File

@@ -295,27 +295,3 @@ void init_paging(void)
/* set ptep in sanitized_page point to itself */
sanitize_pte((uint64_t *)sanitized_page);
}
bool check_continuous_hpa(struct acrn_vm *vm, uint64_t gpa_arg, uint64_t size_arg)
{
uint64_t curr_hpa;
uint64_t next_hpa;
uint64_t gpa = gpa_arg;
uint64_t size = size_arg;
/* if size <= PAGE_SIZE_4K, it is continuous,no need check
* if size > PAGE_SIZE_4K, need to fetch next page
*/
while (size > PAGE_SIZE_4K) {
curr_hpa = gpa2hpa(vm, gpa);
gpa += PAGE_SIZE_4K;
next_hpa = gpa2hpa(vm, gpa);
if ((curr_hpa == INVALID_HPA) || (next_hpa == INVALID_HPA)
|| (next_hpa != (curr_hpa + PAGE_SIZE_4K))) {
return false;
}
size -= PAGE_SIZE_4K;
}
return true;
}

View File

@@ -62,13 +62,11 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
{
uint64_t nworld_pml4e;
uint64_t sworld_pml4e;
uint64_t gpa;
/* Check the HPA of parameter gpa_orig when invoking check_continuos_hpa */
uint64_t hpa;
uint64_t table_present = EPT_RWX;
uint64_t pdpte, *dest_pdpte_p, *src_pdpte_p;
void *sub_table_addr, *pml4_base;
struct acrn_vm *vm0;
uint16_t i;
if ((vm->sworld_control.flag.supported == 0UL)
@@ -77,15 +75,6 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
return;
}
/**
* Check the HPA of parameter gpa_orig should exist
* Check the physical address should be continuous
*/
if (!check_continuous_hpa(vm, gpa_orig, size)) {
ASSERT(false, "The physical addr is not continuous for Trusty");
return;
}
hpa = gpa2hpa(vm, gpa_orig);
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
@@ -133,15 +122,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
/* Map [gpa_rebased, gpa_rebased + size) to secure ept mapping */
ept_mr_add(vm, (uint64_t *)vm->arch_vm.sworld_eptp, hpa, gpa_rebased, size, EPT_RWX | EPT_WB);
/* Get the gpa address in SOS */
gpa = vm0_hpa2gpa(hpa);
/* Unmap trusty memory space from sos ept mapping*/
vm0 = get_vm_from_vmid(0U);
ept_mr_del(vm0, (uint64_t *)vm0->arch_vm.nworld_eptp, gpa, size);
/* Backup secure world info, will be used when destroy secure world and suspend UOS */
vm->sworld_control.sworld_memory.base_gpa_in_sos = gpa;
vm->sworld_control.sworld_memory.base_gpa_in_uos = gpa_orig;
vm->sworld_control.sworld_memory.base_hpa = hpa;
vm->sworld_control.sworld_memory.length = size;
@@ -149,9 +130,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
{
struct acrn_vm *vm0;
uint64_t hpa = vm->sworld_control.sworld_memory.base_hpa;
uint64_t gpa_sos = vm->sworld_control.sworld_memory.base_gpa_in_sos;
uint64_t gpa_uos = vm->sworld_control.sworld_memory.base_gpa_in_uos;
uint64_t size = vm->sworld_control.sworld_memory.length;
@@ -170,13 +149,8 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp);
vm->arch_vm.sworld_eptp = NULL;
/* restore memory to SOS ept mapping */
vm0 = get_vm_from_vmid(0U);
ept_mr_add(vm0, vm0->arch_vm.nworld_eptp, hpa, gpa_sos, size, EPT_RWX | EPT_WB);
/* Restore memory to guest normal world */
ept_mr_add(vm, vm->arch_vm.nworld_eptp, hpa, gpa_uos, size, EPT_RWX | EPT_WB);
}
static inline void save_fxstore_guest_area(struct ext_context *ext_ctx)