mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 12:12:16 +00:00
hv: pgtable: rename 'struct memory_ops' to 'struct pgtable'
The fields and APIs in old 'struct memory_ops' are used to add/modify/delete page table (page or entry). So rename 'struct memory_ops' to 'struct pgtable'. Tracked-On: #5830 Signed-off-by: Li Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
ef98fa69ce
commit
768e483cd2
@ -71,7 +71,7 @@ uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size)
|
||||
void *eptp;
|
||||
|
||||
eptp = get_ept_entry(vm);
|
||||
pgentry = lookup_address((uint64_t *)eptp, gpa, &pg_size, &vm->arch_vm.ept_mem_ops);
|
||||
pgentry = lookup_address((uint64_t *)eptp, gpa, &pg_size, &vm->arch_vm.ept_pgtable);
|
||||
if (pgentry != NULL) {
|
||||
hpa = (((*pgentry & (~EPT_PFN_HIGH_MASK)) & (~(pg_size - 1UL)))
|
||||
| (gpa & (pg_size - 1UL)));
|
||||
@ -143,7 +143,7 @@ void ept_add_mr(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
|
||||
spinlock_obtain(&vm->ept_lock);
|
||||
|
||||
mmu_add(pml4_page, hpa, gpa, size, prot, &vm->arch_vm.ept_mem_ops);
|
||||
mmu_add(pml4_page, hpa, gpa, size, prot, &vm->arch_vm.ept_pgtable);
|
||||
|
||||
spinlock_release(&vm->ept_lock);
|
||||
|
||||
@ -160,7 +160,7 @@ void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page,
|
||||
|
||||
spinlock_obtain(&vm->ept_lock);
|
||||
|
||||
mmu_modify_or_del(pml4_page, gpa, size, local_prot, prot_clr, &(vm->arch_vm.ept_mem_ops), MR_MODIFY);
|
||||
mmu_modify_or_del(pml4_page, gpa, size, local_prot, prot_clr, &(vm->arch_vm.ept_pgtable), MR_MODIFY);
|
||||
|
||||
spinlock_release(&vm->ept_lock);
|
||||
|
||||
@ -175,7 +175,7 @@ void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t
|
||||
|
||||
spinlock_obtain(&vm->ept_lock);
|
||||
|
||||
mmu_modify_or_del(pml4_page, gpa, size, 0UL, 0UL, &vm->arch_vm.ept_mem_ops, MR_DEL);
|
||||
mmu_modify_or_del(pml4_page, gpa, size, 0UL, 0UL, &vm->arch_vm.ept_pgtable, MR_DEL);
|
||||
|
||||
spinlock_release(&vm->ept_lock);
|
||||
|
||||
@ -247,18 +247,18 @@ void *get_ept_entry(struct acrn_vm *vm)
|
||||
*/
|
||||
void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
|
||||
{
|
||||
const struct memory_ops *mem_ops = &vm->arch_vm.ept_mem_ops;
|
||||
const struct pgtable *table = &vm->arch_vm.ept_pgtable;
|
||||
uint64_t *pml4e, *pdpte, *pde, *pte;
|
||||
uint64_t i, j, k, m;
|
||||
|
||||
for (i = 0UL; i < PTRS_PER_PML4E; i++) {
|
||||
pml4e = pml4e_offset((uint64_t *)get_ept_entry(vm), i << PML4E_SHIFT);
|
||||
if (mem_ops->pgentry_present(*pml4e) == 0UL) {
|
||||
if (table->pgentry_present(*pml4e) == 0UL) {
|
||||
continue;
|
||||
}
|
||||
for (j = 0UL; j < PTRS_PER_PDPTE; j++) {
|
||||
pdpte = pdpte_offset(pml4e, j << PDPTE_SHIFT);
|
||||
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
|
||||
if (table->pgentry_present(*pdpte) == 0UL) {
|
||||
continue;
|
||||
}
|
||||
if (pdpte_large(*pdpte) != 0UL) {
|
||||
@ -267,7 +267,7 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
|
||||
}
|
||||
for (k = 0UL; k < PTRS_PER_PDE; k++) {
|
||||
pde = pde_offset(pdpte, k << PDE_SHIFT);
|
||||
if (mem_ops->pgentry_present(*pde) == 0UL) {
|
||||
if (table->pgentry_present(*pde) == 0UL) {
|
||||
continue;
|
||||
}
|
||||
if (pde_large(*pde) != 0UL) {
|
||||
@ -276,7 +276,7 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
|
||||
}
|
||||
for (m = 0UL; m < PTRS_PER_PTE; m++) {
|
||||
pte = pte_offset(pde, m << PTE_SHIFT);
|
||||
if (mem_ops->pgentry_present(*pte) != 0UL) {
|
||||
if (table->pgentry_present(*pte) != 0UL) {
|
||||
cb(pte, PTE_SIZE);
|
||||
}
|
||||
}
|
||||
@ -298,5 +298,5 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
|
||||
|
||||
struct page *alloc_ept_page(struct acrn_vm *vm)
|
||||
{
|
||||
return alloc_page(vm->arch_vm.ept_mem_ops.pool);
|
||||
return alloc_page(vm->arch_vm.ept_pgtable.pool);
|
||||
}
|
||||
|
@ -78,14 +78,14 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
|
||||
*/
|
||||
pml4_base = alloc_ept_page(vm);
|
||||
vm->arch_vm.sworld_eptp = pml4_base;
|
||||
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp, &vm->arch_vm.ept_mem_ops);
|
||||
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp, &vm->arch_vm.ept_pgtable);
|
||||
|
||||
/* The trusty memory is remapped to guest physical address
|
||||
* of gpa_rebased to gpa_rebased + size
|
||||
*/
|
||||
sub_table_addr = alloc_ept_page(vm);
|
||||
sworld_pml4e = hva2hpa(sub_table_addr) | table_present;
|
||||
set_pgentry((uint64_t *)pml4_base, sworld_pml4e, &vm->arch_vm.ept_mem_ops);
|
||||
set_pgentry((uint64_t *)pml4_base, sworld_pml4e, &vm->arch_vm.ept_pgtable);
|
||||
|
||||
nworld_pml4e = get_pgentry((uint64_t *)vm->arch_vm.nworld_eptp);
|
||||
|
||||
@ -99,7 +99,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
|
||||
pdpte = get_pgentry(src_pdpte_p);
|
||||
if ((pdpte & table_present) != 0UL) {
|
||||
pdpte &= ~EPT_EXE;
|
||||
set_pgentry(dest_pdpte_p, pdpte, &vm->arch_vm.ept_mem_ops);
|
||||
set_pgentry(dest_pdpte_p, pdpte, &vm->arch_vm.ept_pgtable);
|
||||
}
|
||||
src_pdpte_p++;
|
||||
dest_pdpte_p++;
|
||||
@ -130,7 +130,7 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
|
||||
|
||||
ept_del_mr(vm, vm->arch_vm.sworld_eptp, gpa_uos, size);
|
||||
/* sanitize trusty ept page-structures */
|
||||
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp, &vm->arch_vm.ept_mem_ops);
|
||||
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp, &vm->arch_vm.ept_pgtable);
|
||||
vm->arch_vm.sworld_eptp = NULL;
|
||||
|
||||
/* Restore memory to guest normal world */
|
||||
|
@ -492,9 +492,9 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
|
||||
vm->vm_id = vm_id;
|
||||
vm->hw.created_vcpus = 0U;
|
||||
|
||||
init_ept_mem_ops(&vm->arch_vm.ept_mem_ops, vm->vm_id);
|
||||
init_ept_pgtable(&vm->arch_vm.ept_pgtable, vm->vm_id);
|
||||
vm->arch_vm.nworld_eptp = alloc_ept_page(vm);
|
||||
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp, &vm->arch_vm.ept_mem_ops);
|
||||
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp, &vm->arch_vm.ept_pgtable);
|
||||
|
||||
(void)memcpy_s(&vm->uuid[0], sizeof(vm->uuid),
|
||||
&vm_config->uuid[0], sizeof(vm_config->uuid));
|
||||
|
@ -142,16 +142,16 @@ static inline uint64_t get_sanitized_page(void)
|
||||
return hva2hpa(sanitized_page);
|
||||
}
|
||||
|
||||
void sanitize_pte_entry(uint64_t *ptep, const struct memory_ops *mem_ops)
|
||||
void sanitize_pte_entry(uint64_t *ptep, const struct pgtable *table)
|
||||
{
|
||||
set_pgentry(ptep, get_sanitized_page(), mem_ops);
|
||||
set_pgentry(ptep, get_sanitized_page(), table);
|
||||
}
|
||||
|
||||
void sanitize_pte(uint64_t *pt_page, const struct memory_ops *mem_ops)
|
||||
void sanitize_pte(uint64_t *pt_page, const struct pgtable *table)
|
||||
{
|
||||
uint64_t i;
|
||||
for (i = 0UL; i < PTRS_PER_PTE; i++) {
|
||||
sanitize_pte_entry(pt_page + i, mem_ops);
|
||||
sanitize_pte_entry(pt_page + i, table);
|
||||
}
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ void ppt_clear_user_bit(uint64_t base, uint64_t size)
|
||||
size_aligned = region_end - base_aligned;
|
||||
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, base_aligned,
|
||||
round_pde_up(size_aligned), 0UL, PAGE_USER, &ppt_mem_ops, MR_MODIFY);
|
||||
round_pde_up(size_aligned), 0UL, PAGE_USER, &ppt_pgtable, MR_MODIFY);
|
||||
}
|
||||
|
||||
void ppt_set_nx_bit(uint64_t base, uint64_t size, bool add)
|
||||
@ -218,10 +218,10 @@ void ppt_set_nx_bit(uint64_t base, uint64_t size, bool add)
|
||||
|
||||
if (add) {
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr,
|
||||
base_aligned, size_aligned, PAGE_NX, 0UL, &ppt_mem_ops, MR_MODIFY);
|
||||
base_aligned, size_aligned, PAGE_NX, 0UL, &ppt_pgtable, MR_MODIFY);
|
||||
} else {
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr,
|
||||
base_aligned, size_aligned, 0UL, PAGE_NX, &ppt_mem_ops, MR_MODIFY);
|
||||
base_aligned, size_aligned, 0UL, PAGE_NX, &ppt_pgtable, MR_MODIFY);
|
||||
}
|
||||
}
|
||||
|
||||
@ -250,10 +250,10 @@ void init_paging(void)
|
||||
}
|
||||
|
||||
/* Allocate memory for Hypervisor PML4 table */
|
||||
ppt_mmu_pml4_addr = alloc_page(ppt_mem_ops.pool);
|
||||
ppt_mmu_pml4_addr = alloc_page(ppt_pgtable.pool);
|
||||
|
||||
/* Map all memory regions to UC attribute */
|
||||
mmu_add((uint64_t *)ppt_mmu_pml4_addr, 0UL, 0UL, high64_max_ram - 0UL, attr_uc, &ppt_mem_ops);
|
||||
mmu_add((uint64_t *)ppt_mmu_pml4_addr, 0UL, 0UL, high64_max_ram - 0UL, attr_uc, &ppt_pgtable);
|
||||
|
||||
/* Modify WB attribute for E820_TYPE_RAM */
|
||||
for (i = 0U; i < entries_count; i++) {
|
||||
@ -269,10 +269,10 @@ void init_paging(void)
|
||||
}
|
||||
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, 0UL, round_pde_up(low32_max_ram),
|
||||
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
|
||||
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_pgtable, MR_MODIFY);
|
||||
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (1UL << 32U), high64_max_ram - (1UL << 32U),
|
||||
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
|
||||
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_pgtable, MR_MODIFY);
|
||||
|
||||
/*
|
||||
* set the paging-structure entries' U/S flag to supervisor-mode for hypervisor owned memroy.
|
||||
@ -284,7 +284,7 @@ void init_paging(void)
|
||||
hv_hva = get_hv_image_base();
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, hv_hva & PDE_MASK,
|
||||
CONFIG_HV_RAM_SIZE + (((hv_hva & (PDE_SIZE - 1UL)) != 0UL) ? PDE_SIZE : 0UL),
|
||||
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER, &ppt_mem_ops, MR_MODIFY);
|
||||
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER, &ppt_pgtable, MR_MODIFY);
|
||||
|
||||
/*
|
||||
* remove 'NX' bit for pages that contain hv code section, as by default XD bit is set for
|
||||
@ -292,10 +292,10 @@ void init_paging(void)
|
||||
*/
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, round_pde_down(hv_hva),
|
||||
round_pde_up((uint64_t)&ld_text_end) - round_pde_down(hv_hva), 0UL,
|
||||
PAGE_NX, &ppt_mem_ops, MR_MODIFY);
|
||||
PAGE_NX, &ppt_pgtable, MR_MODIFY);
|
||||
#if (SOS_VM_NUM == 1)
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (uint64_t)get_reserve_sworld_memory_base(),
|
||||
TRUSTY_RAM_SIZE * MAX_POST_VM_NUM, PAGE_USER, 0UL, &ppt_mem_ops, MR_MODIFY);
|
||||
TRUSTY_RAM_SIZE * MAX_POST_VM_NUM, PAGE_USER, 0UL, &ppt_pgtable, MR_MODIFY);
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -304,14 +304,14 @@ void init_paging(void)
|
||||
|
||||
if ((HI_MMIO_START != ~0UL) && (HI_MMIO_END != 0UL)) {
|
||||
mmu_add((uint64_t *)ppt_mmu_pml4_addr, HI_MMIO_START, HI_MMIO_START,
|
||||
(HI_MMIO_END - HI_MMIO_START), attr_uc, &ppt_mem_ops);
|
||||
(HI_MMIO_END - HI_MMIO_START), attr_uc, &ppt_pgtable);
|
||||
}
|
||||
|
||||
/* Enable paging */
|
||||
enable_paging();
|
||||
|
||||
/* set ptep in sanitized_page point to itself */
|
||||
sanitize_pte((uint64_t *)sanitized_page, &ppt_mem_ops);
|
||||
sanitize_pte((uint64_t *)sanitized_page, &ppt_pgtable);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -115,7 +115,7 @@ static inline uint64_t ppt_pgentry_present(uint64_t pte)
|
||||
static inline void nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {}
|
||||
static inline void nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {}
|
||||
|
||||
const struct memory_ops ppt_mem_ops = {
|
||||
const struct pgtable ppt_pgtable = {
|
||||
.default_access_right = (PAGE_PRESENT | PAGE_RW | PAGE_USER),
|
||||
.pool = &ppt_page_pool,
|
||||
.large_page_support = large_page_support,
|
||||
@ -234,7 +234,7 @@ static inline void ept_recover_exe_right(uint64_t *entry)
|
||||
*entry |= EPT_EXE;
|
||||
}
|
||||
|
||||
void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
|
||||
void init_ept_pgtable(struct pgtable *table, uint16_t vm_id)
|
||||
{
|
||||
struct acrn_vm *vm = get_vm_from_vmid(vm_id);
|
||||
|
||||
@ -254,22 +254,22 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
|
||||
vm->arch_vm.sworld_memory_base_hva = post_uos_sworld_memory[page_idx];
|
||||
}
|
||||
|
||||
mem_ops->pool = &ept_page_pool[vm_id];
|
||||
mem_ops->default_access_right = EPT_RWX;
|
||||
mem_ops->pgentry_present = ept_pgentry_present;
|
||||
mem_ops->clflush_pagewalk = ept_clflush_pagewalk;
|
||||
mem_ops->large_page_support = large_page_support;
|
||||
table->pool = &ept_page_pool[vm_id];
|
||||
table->default_access_right = EPT_RWX;
|
||||
table->pgentry_present = ept_pgentry_present;
|
||||
table->clflush_pagewalk = ept_clflush_pagewalk;
|
||||
table->large_page_support = large_page_support;
|
||||
|
||||
/* Mitigation for issue "Machine Check Error on Page Size Change" */
|
||||
if (is_ept_force_4k_ipage()) {
|
||||
mem_ops->tweak_exe_right = ept_tweak_exe_right;
|
||||
mem_ops->recover_exe_right = ept_recover_exe_right;
|
||||
table->tweak_exe_right = ept_tweak_exe_right;
|
||||
table->recover_exe_right = ept_recover_exe_right;
|
||||
/* For RTVM, build 4KB page mapping in EPT for code pages */
|
||||
if (is_rt_vm(vm)) {
|
||||
mem_ops->large_page_support = use_large_page;
|
||||
table->large_page_support = use_large_page;
|
||||
}
|
||||
} else {
|
||||
mem_ops->tweak_exe_right = nop_tweak_exe_right;
|
||||
mem_ops->recover_exe_right = nop_recover_exe_right;
|
||||
table->tweak_exe_right = nop_tweak_exe_right;
|
||||
table->recover_exe_right = nop_recover_exe_right;
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
#define DBG_LEVEL_MMU 6U
|
||||
|
||||
|
||||
static void try_to_free_pgtable_page(const struct memory_ops *mem_ops,
|
||||
static void try_to_free_pgtable_page(const struct pgtable *table,
|
||||
uint64_t *pde, uint64_t *pt_page, uint32_t type)
|
||||
{
|
||||
if (type == MR_DEL) {
|
||||
@ -22,14 +22,14 @@ static void try_to_free_pgtable_page(const struct memory_ops *mem_ops,
|
||||
|
||||
for (index = 0UL; index < PTRS_PER_PTE; index++) {
|
||||
uint64_t *pte = pt_page + index;
|
||||
if ((mem_ops->pgentry_present(*pte) != 0UL)) {
|
||||
if ((table->pgentry_present(*pte) != 0UL)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (index == PTRS_PER_PTE) {
|
||||
free_page(mem_ops->pool, (void *)pt_page);
|
||||
sanitize_pte_entry(pde, mem_ops);
|
||||
free_page(table->pool, (void *)pt_page);
|
||||
sanitize_pte_entry(pde, table);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -40,7 +40,7 @@ static void try_to_free_pgtable_page(const struct memory_ops *mem_ops,
|
||||
* @pre: level could only IA32E_PDPT or IA32E_PD
|
||||
*/
|
||||
static void split_large_page(uint64_t *pte, enum _page_table_level level,
|
||||
__unused uint64_t vaddr, const struct memory_ops *mem_ops)
|
||||
__unused uint64_t vaddr, const struct pgtable *table)
|
||||
{
|
||||
uint64_t *pbase;
|
||||
uint64_t ref_paddr, paddr, paddrinc;
|
||||
@ -57,46 +57,46 @@ static void split_large_page(uint64_t *pte, enum _page_table_level level,
|
||||
paddrinc = PTE_SIZE;
|
||||
ref_prot = (*pte) & ~PDE_PFN_MASK;
|
||||
ref_prot &= ~PAGE_PSE;
|
||||
mem_ops->recover_exe_right(&ref_prot);
|
||||
table->recover_exe_right(&ref_prot);
|
||||
break;
|
||||
}
|
||||
|
||||
pbase = (uint64_t *)alloc_page(mem_ops->pool);
|
||||
pbase = (uint64_t *)alloc_page(table->pool);
|
||||
dev_dbg(DBG_LEVEL_MMU, "%s, paddr: 0x%lx, pbase: 0x%lx\n", __func__, ref_paddr, pbase);
|
||||
|
||||
paddr = ref_paddr;
|
||||
for (i = 0UL; i < PTRS_PER_PTE; i++) {
|
||||
set_pgentry(pbase + i, paddr | ref_prot, mem_ops);
|
||||
set_pgentry(pbase + i, paddr | ref_prot, table);
|
||||
paddr += paddrinc;
|
||||
}
|
||||
|
||||
ref_prot = mem_ops->default_access_right;
|
||||
set_pgentry(pte, hva2hpa((void *)pbase) | ref_prot, mem_ops);
|
||||
ref_prot = table->default_access_right;
|
||||
set_pgentry(pte, hva2hpa((void *)pbase) | ref_prot, table);
|
||||
|
||||
/* TODO: flush the TLB */
|
||||
}
|
||||
|
||||
static inline void local_modify_or_del_pte(uint64_t *pte,
|
||||
uint64_t prot_set, uint64_t prot_clr, uint32_t type, const struct memory_ops *mem_ops)
|
||||
uint64_t prot_set, uint64_t prot_clr, uint32_t type, const struct pgtable *table)
|
||||
{
|
||||
if (type == MR_MODIFY) {
|
||||
uint64_t new_pte = *pte;
|
||||
new_pte &= ~prot_clr;
|
||||
new_pte |= prot_set;
|
||||
set_pgentry(pte, new_pte, mem_ops);
|
||||
set_pgentry(pte, new_pte, table);
|
||||
} else {
|
||||
sanitize_pte_entry(pte, mem_ops);
|
||||
sanitize_pte_entry(pte, table);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* pgentry may means pml4e/pdpte/pde
|
||||
*/
|
||||
static inline void construct_pgentry(uint64_t *pde, void *pd_page, uint64_t prot, const struct memory_ops *mem_ops)
|
||||
static inline void construct_pgentry(uint64_t *pde, void *pd_page, uint64_t prot, const struct pgtable *table)
|
||||
{
|
||||
sanitize_pte((uint64_t *)pd_page, mem_ops);
|
||||
sanitize_pte((uint64_t *)pd_page, table);
|
||||
|
||||
set_pgentry(pde, hva2hpa(pd_page) | prot, mem_ops);
|
||||
set_pgentry(pde, hva2hpa(pd_page) | prot, table);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -107,7 +107,7 @@ static inline void construct_pgentry(uint64_t *pde, void *pd_page, uint64_t prot
|
||||
* delete [vaddr_start, vaddr_end) MT PT mapping
|
||||
*/
|
||||
static void modify_or_del_pte(uint64_t *pde, uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct memory_ops *mem_ops, uint32_t type)
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct pgtable *table, uint32_t type)
|
||||
{
|
||||
uint64_t *pt_page = pde_page_vaddr(*pde);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
@ -117,7 +117,7 @@ static void modify_or_del_pte(uint64_t *pde, uint64_t vaddr_start, uint64_t vadd
|
||||
for (; index < PTRS_PER_PTE; index++) {
|
||||
uint64_t *pte = pt_page + index;
|
||||
|
||||
if ((mem_ops->pgentry_present(*pte) == 0UL)) {
|
||||
if ((table->pgentry_present(*pte) == 0UL)) {
|
||||
/*suppress warning message for low memory (< 1MBytes),as service VM
|
||||
* will update MTTR attributes for this region by default whether it
|
||||
* is present or not.
|
||||
@ -126,7 +126,7 @@ static void modify_or_del_pte(uint64_t *pde, uint64_t vaddr_start, uint64_t vadd
|
||||
pr_warn("%s, vaddr: 0x%lx pte is not present.\n", __func__, vaddr);
|
||||
}
|
||||
} else {
|
||||
local_modify_or_del_pte(pte, prot_set, prot_clr, type, mem_ops);
|
||||
local_modify_or_del_pte(pte, prot_set, prot_clr, type, table);
|
||||
}
|
||||
|
||||
vaddr += PTE_SIZE;
|
||||
@ -135,7 +135,7 @@ static void modify_or_del_pte(uint64_t *pde, uint64_t vaddr_start, uint64_t vadd
|
||||
}
|
||||
}
|
||||
|
||||
try_to_free_pgtable_page(mem_ops, pde, pt_page, type);
|
||||
try_to_free_pgtable_page(table, pde, pt_page, type);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -146,7 +146,7 @@ static void modify_or_del_pte(uint64_t *pde, uint64_t vaddr_start, uint64_t vadd
|
||||
* delete [vaddr_start, vaddr_end) MT PT mapping
|
||||
*/
|
||||
static void modify_or_del_pde(uint64_t *pdpte, uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct memory_ops *mem_ops, uint32_t type)
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct pgtable *table, uint32_t type)
|
||||
{
|
||||
uint64_t *pd_page = pdpte_page_vaddr(*pdpte);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
@ -157,16 +157,16 @@ static void modify_or_del_pde(uint64_t *pdpte, uint64_t vaddr_start, uint64_t va
|
||||
uint64_t *pde = pd_page + index;
|
||||
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
|
||||
|
||||
if (mem_ops->pgentry_present(*pde) == 0UL) {
|
||||
if (table->pgentry_present(*pde) == 0UL) {
|
||||
if (type == MR_MODIFY) {
|
||||
pr_warn("%s, addr: 0x%lx pde is not present.\n", __func__, vaddr);
|
||||
}
|
||||
} else {
|
||||
if (pde_large(*pde) != 0UL) {
|
||||
if ((vaddr_next > vaddr_end) || (!mem_aligned_check(vaddr, PDE_SIZE))) {
|
||||
split_large_page(pde, IA32E_PD, vaddr, mem_ops);
|
||||
split_large_page(pde, IA32E_PD, vaddr, table);
|
||||
} else {
|
||||
local_modify_or_del_pte(pde, prot_set, prot_clr, type, mem_ops);
|
||||
local_modify_or_del_pte(pde, prot_set, prot_clr, type, table);
|
||||
if (vaddr_next < vaddr_end) {
|
||||
vaddr = vaddr_next;
|
||||
continue;
|
||||
@ -174,7 +174,7 @@ static void modify_or_del_pde(uint64_t *pdpte, uint64_t vaddr_start, uint64_t va
|
||||
break; /* done */
|
||||
}
|
||||
}
|
||||
modify_or_del_pte(pde, vaddr, vaddr_end, prot_set, prot_clr, mem_ops, type);
|
||||
modify_or_del_pte(pde, vaddr, vaddr_end, prot_set, prot_clr, table, type);
|
||||
}
|
||||
if (vaddr_next >= vaddr_end) {
|
||||
break; /* done */
|
||||
@ -182,7 +182,7 @@ static void modify_or_del_pde(uint64_t *pdpte, uint64_t vaddr_start, uint64_t va
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
try_to_free_pgtable_page(mem_ops, pdpte, pd_page, type);
|
||||
try_to_free_pgtable_page(table, pdpte, pd_page, type);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -193,7 +193,7 @@ static void modify_or_del_pde(uint64_t *pdpte, uint64_t vaddr_start, uint64_t va
|
||||
* delete [vaddr_start, vaddr_end) MT PT mapping
|
||||
*/
|
||||
static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct memory_ops *mem_ops, uint32_t type)
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct pgtable *table, uint32_t type)
|
||||
{
|
||||
uint64_t *pdpt_page = pml4e_page_vaddr(*pml4e);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
@ -204,7 +204,7 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
|
||||
uint64_t *pdpte = pdpt_page + index;
|
||||
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
||||
|
||||
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
|
||||
if (table->pgentry_present(*pdpte) == 0UL) {
|
||||
if (type == MR_MODIFY) {
|
||||
pr_warn("%s, vaddr: 0x%lx pdpte is not present.\n", __func__, vaddr);
|
||||
}
|
||||
@ -212,9 +212,9 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
|
||||
if (pdpte_large(*pdpte) != 0UL) {
|
||||
if ((vaddr_next > vaddr_end) ||
|
||||
(!mem_aligned_check(vaddr, PDPTE_SIZE))) {
|
||||
split_large_page(pdpte, IA32E_PDPT, vaddr, mem_ops);
|
||||
split_large_page(pdpte, IA32E_PDPT, vaddr, table);
|
||||
} else {
|
||||
local_modify_or_del_pte(pdpte, prot_set, prot_clr, type, mem_ops);
|
||||
local_modify_or_del_pte(pdpte, prot_set, prot_clr, type, table);
|
||||
if (vaddr_next < vaddr_end) {
|
||||
vaddr = vaddr_next;
|
||||
continue;
|
||||
@ -222,7 +222,7 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
|
||||
break; /* done */
|
||||
}
|
||||
}
|
||||
modify_or_del_pde(pdpte, vaddr, vaddr_end, prot_set, prot_clr, mem_ops, type);
|
||||
modify_or_del_pde(pdpte, vaddr, vaddr_end, prot_set, prot_clr, table, type);
|
||||
}
|
||||
if (vaddr_next >= vaddr_end) {
|
||||
break; /* done */
|
||||
@ -245,7 +245,7 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
|
||||
* delete [vaddr_base, vaddr_base + size ) memory region page table mapping.
|
||||
*/
|
||||
void mmu_modify_or_del(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct memory_ops *mem_ops, uint32_t type)
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct pgtable *table, uint32_t type)
|
||||
{
|
||||
uint64_t vaddr = round_page_up(vaddr_base);
|
||||
uint64_t vaddr_next, vaddr_end;
|
||||
@ -258,10 +258,10 @@ void mmu_modify_or_del(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
|
||||
while (vaddr < vaddr_end) {
|
||||
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
|
||||
pml4e = pml4e_offset(pml4_page, vaddr);
|
||||
if ((mem_ops->pgentry_present(*pml4e) == 0UL) && (type == MR_MODIFY)) {
|
||||
if ((table->pgentry_present(*pml4e) == 0UL) && (type == MR_MODIFY)) {
|
||||
ASSERT(false, "invalid op, pml4e not present");
|
||||
} else {
|
||||
modify_or_del_pdpte(pml4e, vaddr, vaddr_end, prot_set, prot_clr, mem_ops, type);
|
||||
modify_or_del_pdpte(pml4e, vaddr, vaddr_end, prot_set, prot_clr, table, type);
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
}
|
||||
@ -272,7 +272,7 @@ void mmu_modify_or_del(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
|
||||
* add [vaddr_start, vaddr_end) to [paddr_base, ...) MT PT mapping
|
||||
*/
|
||||
static void add_pte(const uint64_t *pde, uint64_t paddr_start, uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot, const struct memory_ops *mem_ops)
|
||||
uint64_t prot, const struct pgtable *table)
|
||||
{
|
||||
uint64_t *pt_page = pde_page_vaddr(*pde);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
@ -284,10 +284,10 @@ static void add_pte(const uint64_t *pde, uint64_t paddr_start, uint64_t vaddr_st
|
||||
for (; index < PTRS_PER_PTE; index++) {
|
||||
uint64_t *pte = pt_page + index;
|
||||
|
||||
if (mem_ops->pgentry_present(*pte) != 0UL) {
|
||||
if (table->pgentry_present(*pte) != 0UL) {
|
||||
pr_fatal("%s, pte 0x%lx is already present!\n", __func__, vaddr);
|
||||
} else {
|
||||
set_pgentry(pte, paddr | prot, mem_ops);
|
||||
set_pgentry(pte, paddr | prot, table);
|
||||
}
|
||||
paddr += PTE_SIZE;
|
||||
vaddr += PTE_SIZE;
|
||||
@ -303,7 +303,7 @@ static void add_pte(const uint64_t *pde, uint64_t paddr_start, uint64_t vaddr_st
|
||||
* add [vaddr_start, vaddr_end) to [paddr_base, ...) MT PT mapping
|
||||
*/
|
||||
static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot, const struct memory_ops *mem_ops)
|
||||
uint64_t prot, const struct pgtable *table)
|
||||
{
|
||||
uint64_t *pd_page = pdpte_page_vaddr(*pdpte);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
@ -320,13 +320,13 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
|
||||
if (pde_large(*pde) != 0UL) {
|
||||
pr_fatal("%s, pde 0x%lx is already present!\n", __func__, vaddr);
|
||||
} else {
|
||||
if (mem_ops->pgentry_present(*pde) == 0UL) {
|
||||
if (mem_ops->large_page_support(IA32E_PD, prot) &&
|
||||
if (table->pgentry_present(*pde) == 0UL) {
|
||||
if (table->large_page_support(IA32E_PD, prot) &&
|
||||
mem_aligned_check(paddr, PDE_SIZE) &&
|
||||
mem_aligned_check(vaddr, PDE_SIZE) &&
|
||||
(vaddr_next <= vaddr_end)) {
|
||||
mem_ops->tweak_exe_right(&local_prot);
|
||||
set_pgentry(pde, paddr | (local_prot | PAGE_PSE), mem_ops);
|
||||
table->tweak_exe_right(&local_prot);
|
||||
set_pgentry(pde, paddr | (local_prot | PAGE_PSE), table);
|
||||
if (vaddr_next < vaddr_end) {
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
@ -334,11 +334,11 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
|
||||
}
|
||||
break; /* done */
|
||||
} else {
|
||||
void *pt_page = alloc_page(mem_ops->pool);
|
||||
construct_pgentry(pde, pt_page, mem_ops->default_access_right, mem_ops);
|
||||
void *pt_page = alloc_page(table->pool);
|
||||
construct_pgentry(pde, pt_page, table->default_access_right, table);
|
||||
}
|
||||
}
|
||||
add_pte(pde, paddr, vaddr, vaddr_end, prot, mem_ops);
|
||||
add_pte(pde, paddr, vaddr, vaddr_end, prot, table);
|
||||
}
|
||||
if (vaddr_next >= vaddr_end) {
|
||||
break; /* done */
|
||||
@ -353,7 +353,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
|
||||
* add [vaddr_start, vaddr_end) to [paddr_base, ...) MT PT mapping
|
||||
*/
|
||||
static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot, const struct memory_ops *mem_ops)
|
||||
uint64_t prot, const struct pgtable *table)
|
||||
{
|
||||
uint64_t *pdpt_page = pml4e_page_vaddr(*pml4e);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
@ -369,13 +369,13 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
|
||||
if (pdpte_large(*pdpte) != 0UL) {
|
||||
pr_fatal("%s, pdpte 0x%lx is already present!\n", __func__, vaddr);
|
||||
} else {
|
||||
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
|
||||
if (mem_ops->large_page_support(IA32E_PDPT, prot) &&
|
||||
if (table->pgentry_present(*pdpte) == 0UL) {
|
||||
if (table->large_page_support(IA32E_PDPT, prot) &&
|
||||
mem_aligned_check(paddr, PDPTE_SIZE) &&
|
||||
mem_aligned_check(vaddr, PDPTE_SIZE) &&
|
||||
(vaddr_next <= vaddr_end)) {
|
||||
mem_ops->tweak_exe_right(&local_prot);
|
||||
set_pgentry(pdpte, paddr | (local_prot | PAGE_PSE), mem_ops);
|
||||
table->tweak_exe_right(&local_prot);
|
||||
set_pgentry(pdpte, paddr | (local_prot | PAGE_PSE), table);
|
||||
if (vaddr_next < vaddr_end) {
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
@ -383,11 +383,11 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
|
||||
}
|
||||
break; /* done */
|
||||
} else {
|
||||
void *pd_page = alloc_page(mem_ops->pool);
|
||||
construct_pgentry(pdpte, pd_page, mem_ops->default_access_right, mem_ops);
|
||||
void *pd_page = alloc_page(table->pool);
|
||||
construct_pgentry(pdpte, pd_page, table->default_access_right, table);
|
||||
}
|
||||
}
|
||||
add_pde(pdpte, paddr, vaddr, vaddr_end, prot, mem_ops);
|
||||
add_pde(pdpte, paddr, vaddr, vaddr_end, prot, table);
|
||||
}
|
||||
if (vaddr_next >= vaddr_end) {
|
||||
break; /* done */
|
||||
@ -403,7 +403,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
|
||||
* @pre: the prot should set before call this function.
|
||||
*/
|
||||
void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base, uint64_t size, uint64_t prot,
|
||||
const struct memory_ops *mem_ops)
|
||||
const struct pgtable *table)
|
||||
{
|
||||
uint64_t vaddr, vaddr_next, vaddr_end;
|
||||
uint64_t paddr;
|
||||
@ -419,11 +419,11 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base, uint
|
||||
while (vaddr < vaddr_end) {
|
||||
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
|
||||
pml4e = pml4e_offset(pml4_page, vaddr);
|
||||
if (mem_ops->pgentry_present(*pml4e) == 0UL) {
|
||||
void *pdpt_page = alloc_page(mem_ops->pool);
|
||||
construct_pgentry(pml4e, pdpt_page, mem_ops->default_access_right, mem_ops);
|
||||
if (table->pgentry_present(*pml4e) == 0UL) {
|
||||
void *pdpt_page = alloc_page(table->pool);
|
||||
construct_pgentry(pml4e, pdpt_page, table->default_access_right, table);
|
||||
}
|
||||
add_pdpte(pml4e, paddr, vaddr, vaddr_end, prot, mem_ops);
|
||||
add_pdpte(pml4e, paddr, vaddr, vaddr_end, prot, table);
|
||||
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
@ -433,32 +433,32 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base, uint
|
||||
/**
|
||||
* @pre (pml4_page != NULL) && (pg_size != NULL)
|
||||
*/
|
||||
const uint64_t *lookup_address(uint64_t *pml4_page, uint64_t addr, uint64_t *pg_size, const struct memory_ops *mem_ops)
|
||||
const uint64_t *lookup_address(uint64_t *pml4_page, uint64_t addr, uint64_t *pg_size, const struct pgtable *table)
|
||||
{
|
||||
const uint64_t *pret = NULL;
|
||||
bool present = true;
|
||||
uint64_t *pml4e, *pdpte, *pde, *pte;
|
||||
|
||||
pml4e = pml4e_offset(pml4_page, addr);
|
||||
present = (mem_ops->pgentry_present(*pml4e) != 0UL);
|
||||
present = (table->pgentry_present(*pml4e) != 0UL);
|
||||
|
||||
if (present) {
|
||||
pdpte = pdpte_offset(pml4e, addr);
|
||||
present = (mem_ops->pgentry_present(*pdpte) != 0UL);
|
||||
present = (table->pgentry_present(*pdpte) != 0UL);
|
||||
if (present) {
|
||||
if (pdpte_large(*pdpte) != 0UL) {
|
||||
*pg_size = PDPTE_SIZE;
|
||||
pret = pdpte;
|
||||
} else {
|
||||
pde = pde_offset(pdpte, addr);
|
||||
present = (mem_ops->pgentry_present(*pde) != 0UL);
|
||||
present = (table->pgentry_present(*pde) != 0UL);
|
||||
if (present) {
|
||||
if (pde_large(*pde) != 0UL) {
|
||||
*pg_size = PDE_SIZE;
|
||||
pret = pde;
|
||||
} else {
|
||||
pte = pte_offset(pde, addr);
|
||||
present = (mem_ops->pgentry_present(*pte) != 0UL);
|
||||
present = (table->pgentry_present(*pte) != 0UL);
|
||||
if (present) {
|
||||
*pg_size = PTE_SIZE;
|
||||
pret = pte;
|
||||
|
@ -108,7 +108,7 @@ struct vm_arch {
|
||||
*/
|
||||
void *sworld_eptp;
|
||||
void *sworld_memory_base_hva;
|
||||
struct memory_ops ept_mem_ops;
|
||||
struct pgtable ept_pgtable;
|
||||
|
||||
struct acrn_vioapics vioapics; /* Virtual IOAPIC/s */
|
||||
struct acrn_vpic vpic; /* Virtual PIC */
|
||||
|
@ -81,8 +81,8 @@ static inline uint64_t round_pde_down(uint64_t val)
|
||||
#define PAGE_SIZE_2M MEM_2M
|
||||
#define PAGE_SIZE_1G MEM_1G
|
||||
|
||||
void sanitize_pte_entry(uint64_t *ptep, const struct memory_ops *mem_ops);
|
||||
void sanitize_pte(uint64_t *pt_page, const struct memory_ops *mem_ops);
|
||||
void sanitize_pte_entry(uint64_t *ptep, const struct pgtable *table);
|
||||
void sanitize_pte(uint64_t *pt_page, const struct pgtable *table);
|
||||
/**
|
||||
* @brief MMU paging enable
|
||||
*
|
||||
@ -110,9 +110,9 @@ void enable_smap(void);
|
||||
*/
|
||||
void init_paging(void);
|
||||
void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base,
|
||||
uint64_t size, uint64_t prot, const struct memory_ops *mem_ops);
|
||||
uint64_t size, uint64_t prot, const struct pgtable *table);
|
||||
void mmu_modify_or_del(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct memory_ops *mem_ops, uint32_t type);
|
||||
uint64_t prot_set, uint64_t prot_clr, const struct pgtable *table, uint32_t type);
|
||||
void ppt_clear_user_bit(uint64_t base, uint64_t size);
|
||||
void ppt_set_nx_bit(uint64_t base, uint64_t size, bool add);
|
||||
|
||||
|
@ -66,7 +66,7 @@ struct page_pool {
|
||||
struct page *dummy_page;
|
||||
};
|
||||
|
||||
struct memory_ops {
|
||||
struct pgtable {
|
||||
uint64_t default_access_right;
|
||||
struct page_pool *pool;
|
||||
bool (*large_page_support)(enum _page_table_level level, uint64_t prot);
|
||||
@ -76,8 +76,8 @@ struct memory_ops {
|
||||
void (*recover_exe_right)(uint64_t *entry);
|
||||
};
|
||||
|
||||
extern const struct memory_ops ppt_mem_ops;
|
||||
void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id);
|
||||
extern const struct pgtable ppt_pgtable;
|
||||
void init_ept_pgtable(struct pgtable *table, uint16_t vm_id);
|
||||
struct page *alloc_page(struct page_pool *pool);
|
||||
void free_page(struct page_pool *pool, struct page *page);
|
||||
void *get_reserve_sworld_memory_base(void);
|
||||
|
@ -255,10 +255,10 @@ static inline uint64_t get_pgentry(const uint64_t *pte)
|
||||
/*
|
||||
* pgentry may means pml4e/pdpte/pde/pte
|
||||
*/
|
||||
static inline void set_pgentry(uint64_t *ptep, uint64_t pte, const struct memory_ops *mem_ops)
|
||||
static inline void set_pgentry(uint64_t *ptep, uint64_t pte, const struct pgtable *table)
|
||||
{
|
||||
*ptep = pte;
|
||||
mem_ops->clflush_pagewalk(ptep);
|
||||
table->clflush_pagewalk(ptep);
|
||||
}
|
||||
|
||||
static inline uint64_t pde_large(uint64_t pde)
|
||||
@ -275,7 +275,7 @@ static inline uint64_t pdpte_large(uint64_t pdpte)
|
||||
*@pre (pml4_page != NULL) && (pg_size != NULL)
|
||||
*/
|
||||
const uint64_t *lookup_address(uint64_t *pml4_page, uint64_t addr,
|
||||
uint64_t *pg_size, const struct memory_ops *mem_ops);
|
||||
uint64_t *pg_size, const struct pgtable *table);
|
||||
|
||||
/**
|
||||
* @}
|
||||
|
Loading…
Reference in New Issue
Block a user