mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-22 17:27:53 +00:00
hv: ept: unify EPT API name to verb-object style
Rename ept_mr_add to ept_add_mr Rename ept_mr_modify to ept_modify_mr Rename ept_mr_del to ept_del_mr Tracked-On: #1842 Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
committed by
ACRN System Integration
parent
4add405978
commit
9960ff98c5
@@ -97,7 +97,7 @@ int32_t ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu)
|
||||
return status;
|
||||
}
|
||||
|
||||
void ept_mr_add(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
void ept_add_mr(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig)
|
||||
{
|
||||
uint16_t i;
|
||||
@@ -122,7 +122,7 @@ void ept_mr_add(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
}
|
||||
}
|
||||
|
||||
void ept_mr_modify(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr)
|
||||
{
|
||||
@@ -145,7 +145,7 @@ void ept_mr_modify(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
/**
|
||||
* @pre [gpa,gpa+size) has been mapped into host physical memory region
|
||||
*/
|
||||
void ept_mr_del(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size)
|
||||
void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size)
|
||||
{
|
||||
struct acrn_vcpu *vcpu;
|
||||
uint16_t i;
|
||||
|
@@ -67,7 +67,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
|
||||
hpa = gpa2hpa(vm, gpa_orig);
|
||||
|
||||
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
|
||||
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp, gpa_orig, size);
|
||||
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, gpa_orig, size);
|
||||
|
||||
/* Copy PDPT entries from Normal world to Secure world
|
||||
* Secure world can access Normal World's memory,
|
||||
@@ -109,7 +109,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
|
||||
}
|
||||
|
||||
/* Map [gpa_rebased, gpa_rebased + size) to secure ept mapping */
|
||||
ept_mr_add(vm, (uint64_t *)vm->arch_vm.sworld_eptp, hpa, gpa_rebased, size, EPT_RWX | EPT_WB);
|
||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.sworld_eptp, hpa, gpa_rebased, size, EPT_RWX | EPT_WB);
|
||||
|
||||
/* Backup secure world info, will be used when destroy secure world and suspend UOS */
|
||||
vm->sworld_control.sworld_memory.base_gpa_in_uos = gpa_orig;
|
||||
@@ -131,13 +131,13 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
|
||||
clac();
|
||||
}
|
||||
|
||||
ept_mr_del(vm, vm->arch_vm.sworld_eptp, gpa_uos, size);
|
||||
ept_del_mr(vm, vm->arch_vm.sworld_eptp, gpa_uos, size);
|
||||
/* sanitize trusty ept page-structures */
|
||||
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp);
|
||||
vm->arch_vm.sworld_eptp = NULL;
|
||||
|
||||
/* Restore memory to guest normal world */
|
||||
ept_mr_add(vm, vm->arch_vm.nworld_eptp, hpa, gpa_uos, size, EPT_RWX | EPT_WB);
|
||||
ept_add_mr(vm, vm->arch_vm.nworld_eptp, hpa, gpa_uos, size, EPT_RWX | EPT_WB);
|
||||
} else {
|
||||
pr_err("sworld eptp is NULL, it's not created");
|
||||
}
|
||||
|
@@ -2171,11 +2171,11 @@ int32_t vlapic_create(struct acrn_vcpu *vcpu)
|
||||
(uint64_t *)vcpu->vm->arch_vm.nworld_eptp;
|
||||
/* only need unmap it from SOS as UOS never mapped it */
|
||||
if (is_sos_vm(vcpu->vm)) {
|
||||
ept_mr_del(vcpu->vm, pml4_page,
|
||||
ept_del_mr(vcpu->vm, pml4_page,
|
||||
DEFAULT_APIC_BASE, PAGE_SIZE);
|
||||
}
|
||||
|
||||
ept_mr_add(vcpu->vm, pml4_page,
|
||||
ept_add_mr(vcpu->vm, pml4_page,
|
||||
vlapic_apicv_get_apic_access_addr(),
|
||||
DEFAULT_APIC_BASE, PAGE_SIZE,
|
||||
EPT_WR | EPT_RD | EPT_UNCACHED);
|
||||
|
@@ -195,7 +195,7 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
|
||||
|
||||
/* Do EPT mapping for GPAs that are backed by physical memory */
|
||||
if (entry->type == E820_TYPE_RAM) {
|
||||
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
|
||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
|
||||
entry->length, EPT_RWX | EPT_WB);
|
||||
|
||||
base_hpa += entry->length;
|
||||
@@ -203,7 +203,7 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
|
||||
|
||||
/* GPAs under 1MB are always backed by physical memory */
|
||||
if ((entry->type != E820_TYPE_RAM) && (entry->baseaddr < (uint64_t)MEM_1M)) {
|
||||
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
|
||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
|
||||
entry->length, EPT_RWX | EPT_UNCACHED);
|
||||
|
||||
base_hpa += entry->length;
|
||||
@@ -337,14 +337,14 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
|
||||
}
|
||||
|
||||
/* create real ept map for all ranges with UC */
|
||||
ept_mr_add(vm, pml4_page, p_e820_mem_info->mem_bottom, p_e820_mem_info->mem_bottom,
|
||||
ept_add_mr(vm, pml4_page, p_e820_mem_info->mem_bottom, p_e820_mem_info->mem_bottom,
|
||||
(p_e820_mem_info->mem_top - p_e820_mem_info->mem_bottom), attr_uc);
|
||||
|
||||
/* update ram entries to WB attr */
|
||||
for (i = 0U; i < entries_count; i++) {
|
||||
entry = p_e820 + i;
|
||||
if (entry->type == E820_TYPE_RAM) {
|
||||
ept_mr_modify(vm, pml4_page, entry->baseaddr, entry->length, EPT_WB, EPT_MT_MASK);
|
||||
ept_modify_mr(vm, pml4_page, entry->baseaddr, entry->length, EPT_WB, EPT_MT_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -361,19 +361,19 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
|
||||
*/
|
||||
epc_secs = get_phys_epc();
|
||||
for (i = 0U; (i < MAX_EPC_SECTIONS) && (epc_secs[i].size != 0UL); i++) {
|
||||
ept_mr_del(vm, pml4_page, epc_secs[i].base, epc_secs[i].size);
|
||||
ept_del_mr(vm, pml4_page, epc_secs[i].base, epc_secs[i].size);
|
||||
}
|
||||
|
||||
/* unmap hypervisor itself for safety
|
||||
* will cause EPT violation if sos accesses hv memory
|
||||
*/
|
||||
hv_hpa = hva2hpa((void *)(get_hv_image_base()));
|
||||
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_HV_RAM_SIZE);
|
||||
ept_del_mr(vm, pml4_page, hv_hpa, CONFIG_HV_RAM_SIZE);
|
||||
/* unmap prelaunch VM memory */
|
||||
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
|
||||
vm_config = get_vm_config(vm_id);
|
||||
if (vm_config->load_order == PRE_LAUNCHED_VM) {
|
||||
ept_mr_del(vm, pml4_page, vm_config->memory.start_hpa, vm_config->memory.size);
|
||||
ept_del_mr(vm, pml4_page, vm_config->memory.start_hpa, vm_config->memory.size);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -387,7 +387,7 @@ static void prepare_epc_vm_memmap(struct acrn_vm *vm)
|
||||
if (is_vsgx_supported(vm->vm_id)) {
|
||||
vm_epc_maps = get_epc_mapping(vm->vm_id);
|
||||
for (i = 0U; (i < MAX_EPC_SECTIONS) && (vm_epc_maps[i].size != 0UL); i++) {
|
||||
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp, vm_epc_maps[i].hpa,
|
||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, vm_epc_maps[i].hpa,
|
||||
vm_epc_maps[i].gpa, vm_epc_maps[i].size, EPT_RWX | EPT_WB);
|
||||
}
|
||||
}
|
||||
@@ -456,7 +456,7 @@ int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_
|
||||
if (vm->sworld_control.flag.supported != 0UL) {
|
||||
struct memory_ops *ept_mem_ops = &vm->arch_vm.ept_mem_ops;
|
||||
|
||||
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||
hva2hpa(ept_mem_ops->get_sworld_memory_base(ept_mem_ops->info)),
|
||||
TRUSTY_EPT_REBASE_GPA, TRUSTY_RAM_SIZE, EPT_WB | EPT_RWX);
|
||||
}
|
||||
|
@@ -157,7 +157,7 @@ static void update_ept(struct acrn_vm *vm, uint64_t start,
|
||||
break;
|
||||
}
|
||||
|
||||
ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp, start, size, attr, EPT_MT_MASK);
|
||||
ept_modify_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, start, size, attr, EPT_MT_MASK);
|
||||
}
|
||||
|
||||
static void update_ept_mem_type(const struct acrn_vmtrr *vmtrr)
|
||||
|
Reference in New Issue
Block a user