hv:refine create/destroy secure world ept api

refine create/destroy secure world ept to
handle discontinuous hpa

Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
This commit is contained in:
Mingqiang Chi 2018-06-14 20:27:33 +08:00 committed by Jack Ren
parent 2bb69c148e
commit c0fbe48037
12 changed files with 119 additions and 93 deletions

View File

@ -107,7 +107,7 @@ void destroy_ept(struct vm *vm)
free_ept_mem(HPA2HVA(vm->arch_vm.sworld_eptp));
}
uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size, bool nworld)
{
uint64_t hpa = 0;
uint32_t pg_size = 0;
@ -115,7 +115,8 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
struct map_params map_params;
map_params.page_table_type = PTT_EPT;
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
map_params.pml4_base = (nworld) ? HPA2HVA(vm->arch_vm.nworld_eptp)
: HPA2HVA(vm->arch_vm.sworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
obtain_last_page_table_entry(&map_params, &entry, (void *)gpa, true);
if (entry.entry_present == PT_PRESENT) {
@ -135,9 +136,9 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
}
/* using return value 0 as failure, make sure guest will not use hpa 0 */
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa)
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa, bool nworld)
{
return _gpa2hpa(vm, gpa, NULL);
return _gpa2hpa(vm, gpa, NULL, nworld);
}
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa)

View File

@ -115,7 +115,7 @@ int copy_from_vm(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
void *g_ptr;
do {
hpa = _gpa2hpa(vm, gpa, &pg_size);
hpa = _gpa2hpa(vm, gpa, &pg_size, true);
if (pg_size == 0) {
ASSERT(0, "copy_from_vm: GPA2HPA not found");
return -EINVAL;
@ -144,7 +144,7 @@ int copy_to_vm(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
void *g_ptr;
do {
hpa = _gpa2hpa(vm, gpa, &pg_size);
hpa = _gpa2hpa(vm, gpa, &pg_size, true);
if (pg_size == 0) {
ASSERT(0, "copy_to_vm: GPA2HPA not found");
return -EINVAL;

View File

@ -643,25 +643,6 @@ void free_paging_struct(void *ptr)
}
}
bool check_continuous_hpa(struct vm *vm, uint64_t gpa, uint64_t size)
{
uint64_t curr_hpa = 0;
uint64_t next_hpa = 0;
/* if size <= PAGE_SIZE_4K, it is continuous,no need check
* if size > PAGE_SIZE_4K, need to fetch next page
*/
while (size > PAGE_SIZE_4K) {
curr_hpa = gpa2hpa(vm, gpa);
gpa += PAGE_SIZE_4K;
next_hpa = gpa2hpa(vm, gpa);
if (next_hpa != (curr_hpa + PAGE_SIZE_4K))
return false;
size -= PAGE_SIZE_4K;
}
return true;
}
uint64_t config_page_table_attr(struct map_params *map_params, uint32_t flags)
{
int table_type = map_params->page_table_type;

2
hypervisor/arch/x86/mtrr.c Executable file → Normal file
View File

@ -154,7 +154,7 @@ static uint32_t update_ept(struct vm *vm, uint64_t start,
attr = MMU_MEM_ATTR_UNCACHED;
}
ept_update_mt(vm, gpa2hpa(vm, start), start, size, attr);
ept_update_mt(vm, gpa2hpa(vm, start, true), start, size, attr);
return attr;
}

View File

@ -50,19 +50,20 @@ static struct key_info g_key_info = {
}
static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
uint64_t size, uint64_t gpa_rebased)
int64_t size, uint64_t gpa_rebased)
{
uint64_t nworld_pml4e = 0;
uint64_t sworld_pml4e = 0;
struct entry_params entry;
struct map_params map_params;
uint64_t gpa = 0;
uint64_t hpa = gpa2hpa(vm, gpa_orig);
uint64_t table_present = (IA32E_EPT_R_BIT |
IA32E_EPT_W_BIT |
IA32E_EPT_X_BIT);
uint64_t gpa_uos = gpa_orig;
uint64_t gpa_sos;
uint64_t adjust_size;
uint64_t mod;
uint64_t hpa = gpa2hpa(vm, gpa_uos, true);
void *sub_table_addr = NULL, *pml4_base = NULL;
struct vm *vm0 = get_vm_from_vmid(0);
int i;
struct vm *vm0 = get_vm_from_vmid(0);
struct vcpu *vcpu;
if (vm0 == NULL) {
@ -76,18 +77,13 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
return;
}
/* Check the physical address should be continuous */
if (!check_continuous_hpa(vm, gpa_orig, size)) {
ASSERT(false, "The physical addr is not continuous for Trusty");
return;
}
map_params.page_table_type = PTT_EPT;
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
unmap_mem(&map_params, (void *)hpa, (void *)gpa_orig, size, 0);
/* Backup secure world info, will be used when
* destroy secure world */
vm->sworld_control.sworld_memory.base_gpa = gpa_rebased;
vm->sworld_control.sworld_memory.base_hpa = hpa;
vm->sworld_control.sworld_memory.length = size;
vm->sworld_control.sworld_memory.gpa_sos = hpa2gpa(vm0, hpa);
vm->sworld_control.sworld_memory.gpa_uos = gpa_orig;
/* Copy PDPT entries from Normal world to Secure world
* Secure world can access Normal World's memory,
@ -103,37 +99,54 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
* of gpa_rebased to gpa_rebased + size
*/
sub_table_addr = alloc_paging_struct();
sworld_pml4e = HVA2HPA(sub_table_addr) | table_present;
sworld_pml4e = HVA2HPA(sub_table_addr) | IA32E_EPT_R_BIT |
IA32E_EPT_W_BIT |
IA32E_EPT_X_BIT;
MEM_WRITE64(pml4_base, sworld_pml4e);
nworld_pml4e = MEM_READ64(HPA2HVA(vm->arch_vm.nworld_eptp));
memcpy_s(HPA2HVA(sworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE,
HPA2HVA(nworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE);
/* Map gpa_rebased~gpa_rebased+size
* to secure ept mapping
*/
map_params.pml4_base = pml4_base;
map_mem(&map_params, (void *)hpa,
(void *)gpa_rebased, size,
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
map_params.page_table_type = PTT_EPT;
while (size > 0) {
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
obtain_last_page_table_entry(&map_params, &entry,
(void *)gpa_uos, true);
mod = (gpa_uos % entry.page_size);
adjust_size = (mod) ? (entry.page_size - mod) : entry.page_size;
if ((uint64_t)size < entry.page_size)
adjust_size = size;
hpa = gpa2hpa(vm, gpa_uos, true);
/* Unmap from normal world */
unmap_mem(&map_params, (void *)hpa,
(void *)gpa_uos, adjust_size, 0);
/* Map to secure world */
map_params.pml4_base = HPA2HVA(vm->arch_vm.sworld_eptp);
map_mem(&map_params, (void *)hpa,
(void *)gpa_rebased, adjust_size,
(MMU_MEM_ATTR_READ |
MMU_MEM_ATTR_WRITE |
MMU_MEM_ATTR_EXECUTE |
MMU_MEM_ATTR_WB_CACHE));
/* Unmap trusty memory space from sos ept mapping*/
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
/* Get the gpa address in SOS */
gpa = hpa2gpa(vm0, hpa);
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0);
/* Unmap trusty memory space from sos ept mapping*/
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
/* Get the gpa address in SOS */
gpa_sos = hpa2gpa(vm0, hpa);
/* Backup secure world info, will be used when
* destroy secure world */
vm->sworld_control.sworld_memory.base_gpa = gpa;
vm->sworld_control.sworld_memory.base_hpa = hpa;
vm->sworld_control.sworld_memory.length = size;
unmap_mem(&map_params, (void *)hpa,
(void *)gpa_sos, adjust_size, 0);
gpa_uos += adjust_size;
size -= adjust_size;
gpa_rebased += adjust_size;
}
foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
@ -143,34 +156,62 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
}
void destroy_secure_world(struct vm *vm)
{
struct map_params map_params;
struct entry_params entry;
struct vm *vm0 = get_vm_from_vmid(0);
uint64_t hpa;
uint64_t mod;
int i;
struct vcpu *vcpu;
uint64_t adjust_size;
uint64_t gpa = vm->sworld_control.sworld_memory.base_gpa;
int64_t size = (int64_t)vm->sworld_control.sworld_memory.length;
if (vm0 == NULL) {
pr_err("Parse vm0 context failed.");
return;
}
/* clear trusty memory space */
memset(HPA2HVA(vm->sworld_control.sworld_memory.base_hpa),
0, vm->sworld_control.sworld_memory.length);
/* restore memory to SOS ept mapping */
map_params.page_table_type = PTT_EPT;
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
while (size > 0) {
/* clear trusty memory space */
map_params.pml4_base = HPA2HVA(vm->arch_vm.sworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
obtain_last_page_table_entry(&map_params, &entry,
(void *)gpa, true);
hpa = gpa2hpa(vm, gpa, false);
mod = (hpa % entry.page_size);
adjust_size = (mod) ? (entry.page_size - mod) : entry.page_size;
if ((uint64_t)size < entry.page_size)
adjust_size = size;
map_mem(&map_params, (void *)vm->sworld_control.sworld_memory.base_hpa,
(void *)vm->sworld_control.sworld_memory.base_gpa,
vm->sworld_control.sworld_memory.length,
(MMU_MEM_ATTR_READ |
MMU_MEM_ATTR_WRITE |
MMU_MEM_ATTR_EXECUTE |
MMU_MEM_ATTR_WB_CACHE));
memset(HPA2HVA(hpa), 0, adjust_size);
/* restore memory to SOS ept mapping */
map_params.pml4_base = HPA2HVA(vm0->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm0->arch_vm.m2p);
/* here gpa=hpa for sos */
map_mem(&map_params, (void *)hpa,
(void *)hpa,
adjust_size,
(MMU_MEM_ATTR_READ |
MMU_MEM_ATTR_WRITE |
MMU_MEM_ATTR_EXECUTE |
MMU_MEM_ATTR_WB_CACHE));
size -= adjust_size;
gpa += adjust_size;
}
foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
foreach_vcpu(i, vm0, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
}
static void save_world_ctx(struct run_context *context)
@ -375,7 +416,7 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
uint64_t trusty_entry_gpa, trusty_base_gpa, trusty_base_hpa;
struct vm *vm = vcpu->vm;
struct trusty_boot_param *boot_param =
(struct trusty_boot_param *)(gpa2hpa(vm, param));
(struct trusty_boot_param *)(gpa2hpa(vm, param, true));
if (sizeof(struct trusty_boot_param) !=
boot_param->size_of_this_struct) {

View File

@ -22,5 +22,4 @@
#define CONFIG_RAM_START 0x6E000000
#define CONFIG_RAM_SIZE 0x02000000 /* 32M */
#define CONFIG_MTRR_ENABLED 1
#define CONFIG_REMAIN_1G_PAGES 1
#endif /* BSP_CFG_H */

View File

@ -310,7 +310,7 @@ int64_t hcall_set_ioreq_buffer(struct vm *vm, uint64_t vmid, uint64_t param)
dev_dbg(ACRN_DBG_HYCALL, "[%d] SET BUFFER=0x%p",
vmid, iobuf.req_buf);
hpa = gpa2hpa(vm, iobuf.req_buf);
hpa = gpa2hpa(vm, iobuf.req_buf, true);
if (hpa == 0) {
pr_err("%s: invalid GPA.\n", __func__);
target_vm->sw.io_shared_page = NULL;
@ -401,7 +401,7 @@ int64_t _set_vm_memmap(struct vm *vm, struct vm *target_vm,
return -1;
}
hpa = gpa2hpa(vm, memmap->vm0_gpa);
hpa = gpa2hpa(vm, memmap->vm0_gpa, true);
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x size=0x%x",
target_vm->attr.id, memmap->remote_gpa, hpa, memmap->length);
@ -567,7 +567,7 @@ int64_t hcall_gpa_to_hpa(struct vm *vm, uint64_t vmid, uint64_t param)
pr_err("HCALL gpa2hpa: Unable copy param from vm\n");
return -1;
}
v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa);
v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa, true);
if (copy_to_vm(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa))) {
pr_err("%s: Unable copy param to vm\n", __func__);
return -1;

View File

@ -114,7 +114,7 @@ static void dump_guest_stack(struct vcpu *vcpu)
cur_context->rsp);
return;
}
hpa = gpa2hpa(vcpu->vm, gpa);
hpa = gpa2hpa(vcpu->vm, gpa, true);
printf("\r\nGuest Stack:\r\n");
printf("Dump stack for vcpu %d, from gva 0x%016llx ->"
"gpa 0x%016llx -> hpa 0x%016llx \r\n",
@ -148,7 +148,7 @@ static void dump_guest_stack(struct vcpu *vcpu)
return;
}
hpa = gpa2hpa(vcpu->vm, gpa);
hpa = gpa2hpa(vcpu->vm, gpa, true);
printf("Dump stack for vcpu %d, from gva 0x%016llx ->"
"gpa 0x%016llx -> hpa 0x%016llx \r\n",
vcpu->vcpu_id, cur_context->rsp + page1_size,
@ -199,7 +199,7 @@ static void show_guest_call_trace(struct vcpu *vcpu)
printf("gva2gpa failed for guest bp 0x%016llx\r\n", bp);
break;
}
hpa = gpa2hpa(vcpu->vm, gpa);
hpa = gpa2hpa(vcpu->vm, gpa, true);
hva = HPA2HVA(hpa);
printf("BP_GVA(0x%016llx)->BP_GPA(0x%016llx)"
"->BP_HPA(0x%016llx) RIP=0x%016llx\r\n", bp, gpa, hpa,

View File

@ -729,7 +729,7 @@ int shell_vcpu_dumpreg(struct shell *p_shell,
if (status) {
shell_puts(p_shell, "Cannot handle user gva yet!\r\n");
} else {
hpa = gpa2hpa(vm, gpa);
hpa = gpa2hpa(vm, gpa, true);
snprintf(temp_str, MAX_STR_SIZE,
"\r\nDump RSP for vm %d, from "
"gva 0x%016llx -> gpa 0x%016llx"
@ -796,7 +796,7 @@ int shell_vcpu_dumpmem(struct shell *p_shell,
shell_puts(p_shell,
"Cannot handle user gva yet!\r\n");
} else {
hpa = gpa2hpa(vcpu->vm, gpa);
hpa = gpa2hpa(vcpu->vm, gpa, true);
snprintf(temp_str, MAX_STR_SIZE,
"Dump memory for vcpu %d, from gva 0x%016llx ->"
"gpa 0x%016llx -> hpa 0x%016llx, length "

View File

@ -376,8 +376,8 @@ extern uint8_t CPU_Boot_Page_Tables_Start_VM[];
int is_ept_supported(void);
uint64_t create_guest_initial_paging(struct vm *vm);
void destroy_ept(struct vm *vm);
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size);
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa, bool nworld);
uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size, bool nworld);
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa);
int ept_mmap(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size, uint32_t type, uint32_t prot);

View File

@ -91,12 +91,16 @@ struct key_info {
};
struct secure_world_memory {
/* The secure world base address of GPA in SOS */
/* The secure world base address of GPA for secure world*/
uint64_t base_gpa;
/* The secure world base address of HPA */
uint64_t base_hpa;
/* Secure world runtime memory size */
uint64_t length;
/* The secure world base address of GPA in SOS */
uint64_t gpa_sos;
/* The secure world base address of GPA in UOS for normal world */
uint64_t gpa_uos;
};
struct secure_world_control {

View File

@ -35,7 +35,7 @@
#define HPA2HVA(x) ((void *)(x))
#define HVA2HPA(x) ((uint64_t)(x))
/* gpa --> hpa -->hva */
#define GPA2HVA(vm, x) HPA2HVA(gpa2hpa(vm, x))
#define GPA2HVA(vm, x) HPA2HVA(gpa2hpa(vm, x, true))
#endif /* !ASSEMBLER */
#endif /* HYPERVISOR_H */