mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-19 16:50:40 +00:00
hv: mmu: replace the old mmu_del
1. use ept_mr_del to delete EPT memory region. 2. remove unmap_mem 3. remove mem_map_request_type Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
236bb10e4d
commit
c7799584dc
@ -32,13 +32,6 @@
|
|||||||
|
|
||||||
static void *mmu_pml4_addr;
|
static void *mmu_pml4_addr;
|
||||||
|
|
||||||
enum mem_map_request_type {
|
|
||||||
PAGING_REQUEST_TYPE_MAP = 0, /* Creates a new mapping. */
|
|
||||||
PAGING_REQUEST_TYPE_UNMAP = 1, /* Removes a pre-existing entry */
|
|
||||||
/* Modifies a pre-existing entries attributes. */
|
|
||||||
PAGING_REQUEST_TYPE_UNKNOWN,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct vmx_capability {
|
static struct vmx_capability {
|
||||||
uint32_t ept;
|
uint32_t ept;
|
||||||
uint32_t vpid;
|
uint32_t vpid;
|
||||||
@ -236,16 +229,14 @@ check_page_table_present(enum _page_table_type page_table_type,
|
|||||||
|
|
||||||
static uint32_t map_mem_region(void *vaddr, void *paddr,
|
static uint32_t map_mem_region(void *vaddr, void *paddr,
|
||||||
void *table_base, uint64_t attr_arg, uint32_t table_level,
|
void *table_base, uint64_t attr_arg, uint32_t table_level,
|
||||||
enum _page_table_type table_type,
|
enum _page_table_type table_type)
|
||||||
enum mem_map_request_type request_type)
|
|
||||||
{
|
{
|
||||||
uint64_t table_entry;
|
uint64_t table_entry;
|
||||||
uint64_t attr = attr_arg;
|
uint64_t attr = attr_arg;
|
||||||
uint32_t table_offset;
|
uint32_t table_offset;
|
||||||
uint32_t mapped_size;
|
uint32_t mapped_size;
|
||||||
|
|
||||||
if (table_base == NULL || table_level >= IA32E_UNKNOWN
|
if (table_base == NULL || table_level >= IA32E_UNKNOWN) {
|
||||||
|| request_type >= PAGING_REQUEST_TYPE_UNKNOWN) {
|
|
||||||
/* Shouldn't go here */
|
/* Shouldn't go here */
|
||||||
ASSERT(false, "Incorrect Arguments. Failed to map region");
|
ASSERT(false, "Incorrect Arguments. Failed to map region");
|
||||||
return 0;
|
return 0;
|
||||||
@ -315,7 +306,6 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
|||||||
/* Get current table entry */
|
/* Get current table entry */
|
||||||
uint64_t entry = mem_read64(table_base + table_offset);
|
uint64_t entry = mem_read64(table_base + table_offset);
|
||||||
bool prev_entry_present = false;
|
bool prev_entry_present = false;
|
||||||
bool mmu_need_invtlb = false;
|
|
||||||
|
|
||||||
switch(check_page_table_present(table_type, entry)) {
|
switch(check_page_table_present(table_type, entry)) {
|
||||||
case PT_PRESENT:
|
case PT_PRESENT:
|
||||||
@ -330,53 +320,23 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (request_type) {
|
/* No need to confirm current table entry
|
||||||
case PAGING_REQUEST_TYPE_MAP:
|
* isn't already present
|
||||||
{
|
* support map-->remap
|
||||||
/* No need to confirm current table entry
|
*/
|
||||||
* isn't already present
|
table_entry = ((table_type == PTT_EPT)
|
||||||
* support map-->remap
|
? attr
|
||||||
*/
|
: (attr | IA32E_COMM_P_BIT));
|
||||||
table_entry = ((table_type == PTT_EPT)
|
|
||||||
? attr
|
|
||||||
: (attr | IA32E_COMM_P_BIT));
|
|
||||||
|
|
||||||
table_entry |= (uint64_t)paddr;
|
table_entry |= (uint64_t)paddr;
|
||||||
|
|
||||||
/* Write the table entry to map this memory */
|
/* Write the table entry to map this memory */
|
||||||
mem_write64(table_base + table_offset, table_entry);
|
mem_write64(table_base + table_offset, table_entry);
|
||||||
|
|
||||||
/* Invalidate TLB and page-structure cache,
|
/* Invalidate TLB and page-structure cache,
|
||||||
* if it is the first mapping no need to invalidate TLB
|
* if it is the first mapping no need to invalidate TLB
|
||||||
*/
|
*/
|
||||||
if ((table_type == PTT_HOST) && prev_entry_present) {
|
if ((table_type == PTT_HOST) && prev_entry_present) {
|
||||||
mmu_need_invtlb = true;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case PAGING_REQUEST_TYPE_UNMAP:
|
|
||||||
{
|
|
||||||
if (prev_entry_present) {
|
|
||||||
/* Table is present.
|
|
||||||
* Write the table entry to map this memory
|
|
||||||
*/
|
|
||||||
mem_write64(table_base + table_offset, 0);
|
|
||||||
|
|
||||||
/* Unmap, need to invalidate TLB and
|
|
||||||
* page-structure cache
|
|
||||||
*/
|
|
||||||
if (table_type == PTT_HOST) {
|
|
||||||
mmu_need_invtlb = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
ASSERT(false, "Bad memory map request type");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mmu_need_invtlb) {
|
|
||||||
/* currently, all native mmu update is done at BSP,
|
/* currently, all native mmu update is done at BSP,
|
||||||
* the assumption is that after AP start, there
|
* the assumption is that after AP start, there
|
||||||
* is no mmu update - so we can avoid shootdown issue
|
* is no mmu update - so we can avoid shootdown issue
|
||||||
@ -782,7 +742,7 @@ int obtain_last_page_table_entry(struct map_params *map_params,
|
|||||||
|
|
||||||
static uint64_t update_page_table_entry(struct map_params *map_params,
|
static uint64_t update_page_table_entry(struct map_params *map_params,
|
||||||
void *paddr, void *vaddr, uint64_t size, uint64_t attr,
|
void *paddr, void *vaddr, uint64_t size, uint64_t attr,
|
||||||
enum mem_map_request_type request_type, bool direct)
|
bool direct)
|
||||||
{
|
{
|
||||||
uint64_t remaining_size = size;
|
uint64_t remaining_size = size;
|
||||||
uint32_t adjustment_size;
|
uint32_t adjustment_size;
|
||||||
@ -805,7 +765,7 @@ static uint64_t update_page_table_entry(struct map_params *map_params,
|
|||||||
/* Map this 1 GByte memory region */
|
/* Map this 1 GByte memory region */
|
||||||
adjustment_size = map_mem_region(vaddr, paddr,
|
adjustment_size = map_mem_region(vaddr, paddr,
|
||||||
table_addr, attr, IA32E_PDPT,
|
table_addr, attr, IA32E_PDPT,
|
||||||
table_type, request_type);
|
table_type);
|
||||||
} else if ((remaining_size >= MEM_2M)
|
} else if ((remaining_size >= MEM_2M)
|
||||||
&& (MEM_ALIGNED_CHECK(vaddr, MEM_2M))
|
&& (MEM_ALIGNED_CHECK(vaddr, MEM_2M))
|
||||||
&& (MEM_ALIGNED_CHECK(paddr, MEM_2M))) {
|
&& (MEM_ALIGNED_CHECK(paddr, MEM_2M))) {
|
||||||
@ -817,8 +777,7 @@ static uint64_t update_page_table_entry(struct map_params *map_params,
|
|||||||
}
|
}
|
||||||
/* Map this 2 MByte memory region */
|
/* Map this 2 MByte memory region */
|
||||||
adjustment_size = map_mem_region(vaddr, paddr,
|
adjustment_size = map_mem_region(vaddr, paddr,
|
||||||
table_addr, attr, IA32E_PD, table_type,
|
table_addr, attr, IA32E_PD, table_type);
|
||||||
request_type);
|
|
||||||
} else {
|
} else {
|
||||||
/* Walk from the PDPT table to the PD table */
|
/* Walk from the PDPT table to the PD table */
|
||||||
table_addr = walk_paging_struct(vaddr,
|
table_addr = walk_paging_struct(vaddr,
|
||||||
@ -835,7 +794,7 @@ static uint64_t update_page_table_entry(struct map_params *map_params,
|
|||||||
/* Map this 4 KByte memory region */
|
/* Map this 4 KByte memory region */
|
||||||
adjustment_size = map_mem_region(vaddr, paddr,
|
adjustment_size = map_mem_region(vaddr, paddr,
|
||||||
table_addr, attr, IA32E_PT,
|
table_addr, attr, IA32E_PT,
|
||||||
table_type, request_type);
|
table_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
return adjustment_size;
|
return adjustment_size;
|
||||||
@ -939,8 +898,7 @@ static uint64_t break_page_table(struct map_params *map_params, void *paddr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int modify_paging(struct map_params *map_params, void *paddr,
|
static int modify_paging(struct map_params *map_params, void *paddr,
|
||||||
void *vaddr, uint64_t size, uint32_t flags,
|
void *vaddr, uint64_t size, uint32_t flags, bool direct)
|
||||||
enum mem_map_request_type request_type, bool direct)
|
|
||||||
{
|
{
|
||||||
int64_t remaining_size;
|
int64_t remaining_size;
|
||||||
uint64_t adjust_size;
|
uint64_t adjust_size;
|
||||||
@ -957,10 +915,9 @@ static int modify_paging(struct map_params *map_params, void *paddr,
|
|||||||
vaddr_end = ROUND_PAGE_DOWN(vaddr_end);
|
vaddr_end = ROUND_PAGE_DOWN(vaddr_end);
|
||||||
remaining_size = vaddr_end - (uint64_t)vaddr;
|
remaining_size = vaddr_end - (uint64_t)vaddr;
|
||||||
|
|
||||||
if ((request_type >= PAGING_REQUEST_TYPE_UNKNOWN)
|
if (map_params == NULL) {
|
||||||
|| (map_params == NULL)) {
|
pr_err("%s: vaddr=0x%llx size=0x%llx",
|
||||||
pr_err("%s: vaddr=0x%llx size=0x%llx req_type=0x%lx",
|
__func__, vaddr, size);
|
||||||
__func__, vaddr, size, request_type);
|
|
||||||
ASSERT(false, "Incorrect Arguments");
|
ASSERT(false, "Incorrect Arguments");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -986,17 +943,8 @@ static int modify_paging(struct map_params *map_params, void *paddr,
|
|||||||
direct) < 0) {
|
direct) < 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
/* filter the unmap request, no action in this case*/
|
|
||||||
page_size = entry.page_size;
|
page_size = entry.page_size;
|
||||||
if ((request_type == PAGING_REQUEST_TYPE_UNMAP)
|
|
||||||
&& (entry.entry_present == PT_NOT_PRESENT)) {
|
|
||||||
adjust_size =
|
|
||||||
page_size - ((uint64_t)(vaddr) % page_size);
|
|
||||||
vaddr += adjust_size;
|
|
||||||
paddr += adjust_size;
|
|
||||||
remaining_size -= adjust_size;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if the address is NOT aligned of current page size,
|
/* if the address is NOT aligned of current page size,
|
||||||
* or required memory size < page size
|
* or required memory size < page size
|
||||||
@ -1024,7 +972,7 @@ static int modify_paging(struct map_params *map_params, void *paddr,
|
|||||||
}
|
}
|
||||||
/* The function return the memory size that one entry can map */
|
/* The function return the memory size that one entry can map */
|
||||||
adjust_size = update_page_table_entry(map_params, paddr, vaddr,
|
adjust_size = update_page_table_entry(map_params, paddr, vaddr,
|
||||||
page_size, attr, request_type, direct);
|
page_size, attr, direct);
|
||||||
if (adjust_size == 0UL) {
|
if (adjust_size == 0UL) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -1042,34 +990,14 @@ int map_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* used for MMU and EPT*/
|
/* used for MMU and EPT*/
|
||||||
ret = modify_paging(map_params, paddr, vaddr, size, flags,
|
ret = modify_paging(map_params, paddr, vaddr, size, flags, true);
|
||||||
PAGING_REQUEST_TYPE_MAP, true);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
/* only for EPT */
|
/* only for EPT */
|
||||||
if (map_params->page_table_type == PTT_EPT) {
|
if (map_params->page_table_type == PTT_EPT) {
|
||||||
ret = modify_paging(map_params, vaddr, paddr, size, flags,
|
ret = modify_paging(map_params, vaddr, paddr, size, flags,
|
||||||
PAGING_REQUEST_TYPE_MAP, false);
|
false);
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int unmap_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
|
||||||
uint64_t size, uint32_t flags)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
/* used for MMU and EPT */
|
|
||||||
ret = modify_paging(map_params, paddr, vaddr, size, flags,
|
|
||||||
PAGING_REQUEST_TYPE_UNMAP, true);
|
|
||||||
if (ret < 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
/* only for EPT */
|
|
||||||
if (map_params->page_table_type == PTT_EPT) {
|
|
||||||
ret = modify_paging(map_params, vaddr, paddr, size, flags,
|
|
||||||
PAGING_REQUEST_TYPE_UNMAP, false);
|
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -107,12 +107,9 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
map_params.page_table_type = PTT_EPT;
|
|
||||||
map_params.pml4_inverted = vm->arch_vm.m2p;
|
|
||||||
|
|
||||||
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
|
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
|
||||||
map_params.pml4_base = vm->arch_vm.nworld_eptp;
|
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||||
unmap_mem(&map_params, (void *)hpa, (void *)gpa_orig, size, 0U);
|
gpa_orig, size);
|
||||||
|
|
||||||
/* Copy PDPT entries from Normal world to Secure world
|
/* Copy PDPT entries from Normal world to Secure world
|
||||||
* Secure world can access Normal World's memory,
|
* Secure world can access Normal World's memory,
|
||||||
@ -153,6 +150,8 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||||||
/* Map gpa_rebased~gpa_rebased+size
|
/* Map gpa_rebased~gpa_rebased+size
|
||||||
* to secure ept mapping
|
* to secure ept mapping
|
||||||
*/
|
*/
|
||||||
|
map_params.page_table_type = PTT_EPT;
|
||||||
|
map_params.pml4_inverted = vm->arch_vm.m2p;
|
||||||
map_params.pml4_base = pml4_base;
|
map_params.pml4_base = pml4_base;
|
||||||
map_mem(&map_params, (void *)hpa,
|
map_mem(&map_params, (void *)hpa,
|
||||||
(void *)gpa_rebased, size,
|
(void *)gpa_rebased, size,
|
||||||
@ -161,12 +160,11 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||||||
IA32E_EPT_X_BIT |
|
IA32E_EPT_X_BIT |
|
||||||
IA32E_EPT_WB));
|
IA32E_EPT_WB));
|
||||||
|
|
||||||
/* Unmap trusty memory space from sos ept mapping*/
|
|
||||||
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
|
|
||||||
map_params.pml4_inverted = vm0->arch_vm.m2p;
|
|
||||||
/* Get the gpa address in SOS */
|
/* Get the gpa address in SOS */
|
||||||
gpa = hpa2gpa(vm0, hpa);
|
gpa = hpa2gpa(vm0, hpa);
|
||||||
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0);
|
/* Unmap trusty memory space from sos ept mapping*/
|
||||||
|
ept_mr_del(vm0, (uint64_t *)vm0->arch_vm.nworld_eptp,
|
||||||
|
gpa, size);
|
||||||
|
|
||||||
/* Backup secure world info, will be used when
|
/* Backup secure world info, will be used when
|
||||||
* destroy secure world */
|
* destroy secure world */
|
||||||
@ -177,10 +175,6 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||||||
foreach_vcpu(i, vm, vcpu) {
|
foreach_vcpu(i, vm, vcpu) {
|
||||||
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
|
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
foreach_vcpu(i, vm0, vcpu) {
|
|
||||||
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void destroy_secure_world(struct vm *vm)
|
void destroy_secure_world(struct vm *vm)
|
||||||
|
@ -127,8 +127,8 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||||||
hpa = gpa2hpa(vm, gpa_uos);
|
hpa = gpa2hpa(vm, gpa_uos);
|
||||||
|
|
||||||
/* Unmap from normal world */
|
/* Unmap from normal world */
|
||||||
unmap_mem(&map_params, (void *)hpa,
|
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||||
(void *)gpa_uos, adjust_size, 0U);
|
gpa_uos, adjust_size);
|
||||||
|
|
||||||
/* Map to secure world */
|
/* Map to secure world */
|
||||||
map_params.pml4_base = vm->arch_vm.sworld_eptp;
|
map_params.pml4_base = vm->arch_vm.sworld_eptp;
|
||||||
@ -139,14 +139,12 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||||||
IA32E_EPT_X_BIT |
|
IA32E_EPT_X_BIT |
|
||||||
IA32E_EPT_WB));
|
IA32E_EPT_WB));
|
||||||
|
|
||||||
/* Unmap trusty memory space from sos ept mapping*/
|
|
||||||
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
|
|
||||||
map_params.pml4_inverted = vm0->arch_vm.m2p;
|
|
||||||
/* Get the gpa address in SOS */
|
/* Get the gpa address in SOS */
|
||||||
gpa_sos = hpa2gpa(vm0, hpa);
|
gpa_sos = hpa2gpa(vm0, hpa);
|
||||||
|
|
||||||
unmap_mem(&map_params, (void *)hpa,
|
/* Unmap trusty memory space from sos ept mapping*/
|
||||||
(void *)gpa_sos, adjust_size, 0U);
|
ept_mr_del(vm0, (uint64_t *)vm0->arch_vm.nworld_eptp,
|
||||||
|
gpa_sos, adjust_size);
|
||||||
gpa_uos += adjust_size;
|
gpa_uos += adjust_size;
|
||||||
size -= adjust_size;
|
size -= adjust_size;
|
||||||
gpa_rebased += adjust_size;
|
gpa_rebased += adjust_size;
|
||||||
@ -155,10 +153,6 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||||||
foreach_vcpu(i, vm, vcpu) {
|
foreach_vcpu(i, vm, vcpu) {
|
||||||
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
|
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
foreach_vcpu(i, vm0, vcpu) {
|
|
||||||
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void destroy_secure_world(struct vm *vm)
|
void destroy_secure_world(struct vm *vm)
|
||||||
|
@ -324,8 +324,6 @@ void enable_smep(void);
|
|||||||
void init_paging(void);
|
void init_paging(void);
|
||||||
int map_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
int map_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
||||||
uint64_t size, uint32_t flags);
|
uint64_t size, uint32_t flags);
|
||||||
int unmap_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
|
||||||
uint64_t size, uint32_t flags);
|
|
||||||
int mmu_modify_or_del(uint64_t *pml4_page,
|
int mmu_modify_or_del(uint64_t *pml4_page,
|
||||||
uint64_t vaddr_base, uint64_t size,
|
uint64_t vaddr_base, uint64_t size,
|
||||||
uint64_t prot_set, uint64_t prot_clr,
|
uint64_t prot_set, uint64_t prot_clr,
|
||||||
|
Loading…
Reference in New Issue
Block a user