mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-06 19:30:46 +00:00
hv: mmu: remove alloc_page() API
No one would call this API for now. So remove it. Tracked-On: #861 Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
@@ -164,10 +164,6 @@ config MALLOC_ALIGN
|
||||
range 8 32
|
||||
default 16
|
||||
|
||||
config NUM_ALLOC_PAGES
|
||||
hex "Capacity in pages of the heap for page_alloc()"
|
||||
default 0x1000
|
||||
|
||||
config HEAP_SIZE
|
||||
hex "Capacity of the heap for malloc()"
|
||||
default 0x100000
|
||||
@@ -222,7 +218,7 @@ config HV_RAM_START
|
||||
|
||||
config HV_RAM_SIZE
|
||||
hex "Size of the RAM region used by the hypervisor"
|
||||
default 0x06000000
|
||||
default 0x04800000
|
||||
help
|
||||
A 64-bit integer indicating the size of RAM used by the hypervisor.
|
||||
It is ensured at link time that the footprint of the hypervisor
|
||||
|
@@ -533,7 +533,7 @@ void cpu_secondary_init(void)
|
||||
/* Switch this CPU to use the same page tables set-up by the
|
||||
* primary/boot CPU
|
||||
*/
|
||||
enable_paging(get_paging_pml4());
|
||||
enable_paging();
|
||||
|
||||
enable_smep();
|
||||
|
||||
|
@@ -30,7 +30,7 @@
|
||||
#include <hypervisor.h>
|
||||
#include <reloc.h>
|
||||
|
||||
static void *mmu_pml4_addr;
|
||||
static void *ppt_mmu_pml4_addr;
|
||||
static void *sanitized_page[CPU_PAGE_SIZE];
|
||||
|
||||
static struct vmx_capability {
|
||||
@@ -206,13 +206,7 @@ void sanitize_pte(uint64_t *pt_page)
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t get_paging_pml4(void)
|
||||
{
|
||||
/* Return address to caller */
|
||||
return hva2hpa(mmu_pml4_addr);
|
||||
}
|
||||
|
||||
void enable_paging(uint64_t pml4_base_addr)
|
||||
void enable_paging(void)
|
||||
{
|
||||
uint64_t tmp64 = 0UL;
|
||||
|
||||
@@ -220,7 +214,7 @@ void enable_paging(uint64_t pml4_base_addr)
|
||||
CPU_CR_READ(cr0, &tmp64);
|
||||
CPU_CR_WRITE(cr0, tmp64 | CR0_WP);
|
||||
|
||||
CPU_CR_WRITE(cr3, pml4_base_addr);
|
||||
CPU_CR_WRITE(cr3, hva2hpa(ppt_mmu_pml4_addr));
|
||||
}
|
||||
|
||||
void enable_smep(void)
|
||||
@@ -240,12 +234,12 @@ void init_paging(void)
|
||||
uint32_t i;
|
||||
uint64_t low32_max_ram = 0UL;
|
||||
uint64_t high64_max_ram;
|
||||
uint64_t attr_uc = (PAGE_TABLE | PAGE_CACHE_UC);
|
||||
uint64_t attr_uc = (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_CACHE_UC);
|
||||
|
||||
pr_dbg("HV MMU Initialization");
|
||||
|
||||
/* Allocate memory for Hypervisor PML4 table */
|
||||
mmu_pml4_addr = ppt_mem_ops.get_pml4_page(ppt_mem_ops.info, 0UL);
|
||||
ppt_mmu_pml4_addr = ppt_mem_ops.get_pml4_page(ppt_mem_ops.info, 0UL);
|
||||
|
||||
init_e820();
|
||||
obtain_e820_mem_info();
|
||||
@@ -259,7 +253,7 @@ void init_paging(void)
|
||||
}
|
||||
|
||||
/* Map all memory regions to UC attribute */
|
||||
mmu_add((uint64_t *)mmu_pml4_addr, e820_mem.mem_bottom, e820_mem.mem_bottom,
|
||||
mmu_add((uint64_t *)ppt_mmu_pml4_addr, e820_mem.mem_bottom, e820_mem.mem_bottom,
|
||||
high64_max_ram - e820_mem.mem_bottom, attr_uc, &ppt_mem_ops);
|
||||
|
||||
/* Modify WB attribute for E820_TYPE_RAM */
|
||||
@@ -275,49 +269,28 @@ void init_paging(void)
|
||||
}
|
||||
}
|
||||
|
||||
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, 0UL, (low32_max_ram + PDE_SIZE - 1UL) & PDE_MASK,
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, 0UL, (low32_max_ram + PDE_SIZE - 1UL) & PDE_MASK,
|
||||
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
|
||||
|
||||
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, (1UL << 32U), high64_max_ram - (1UL << 32U),
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (1UL << 32U), high64_max_ram - (1UL << 32U),
|
||||
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
|
||||
|
||||
/* set the paging-structure entries' U/S flag
|
||||
* to supervisor-mode for hypervisor owned memroy.
|
||||
*/
|
||||
hv_hpa = get_hv_image_base();
|
||||
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa & PDE_MASK,
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, hv_hpa & PDE_MASK,
|
||||
CONFIG_HV_RAM_SIZE + ((hv_hpa & (PDE_SIZE - 1UL)) != 0UL) ? PDE_SIZE : 0UL,
|
||||
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER,
|
||||
&ppt_mem_ops, MR_MODIFY);
|
||||
|
||||
/* Enable paging */
|
||||
enable_paging(hva2hpa(mmu_pml4_addr));
|
||||
enable_paging();
|
||||
|
||||
/* set ptep in sanitized_page point to itself */
|
||||
sanitize_pte((uint64_t *)sanitized_page);
|
||||
}
|
||||
|
||||
void *alloc_paging_struct(void)
|
||||
{
|
||||
void *ptr = NULL;
|
||||
|
||||
/* Allocate a page from Hypervisor heap */
|
||||
ptr = alloc_page();
|
||||
|
||||
ASSERT(ptr != NULL, "page alloc failed!");
|
||||
(void)memset(ptr, 0U, CPU_PAGE_SIZE);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void free_paging_struct(void *ptr)
|
||||
{
|
||||
if (ptr != NULL) {
|
||||
(void)memset(ptr, 0U, CPU_PAGE_SIZE);
|
||||
free(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg)
|
||||
{
|
||||
uint64_t curr_hpa;
|
||||
|
@@ -134,10 +134,10 @@ struct iommu_domain {
|
||||
};
|
||||
|
||||
struct context_table {
|
||||
struct cpu_page buses[CONFIG_IOMMU_INIT_BUS_LIMIT];
|
||||
struct page buses[CONFIG_IOMMU_INIT_BUS_LIMIT];
|
||||
};
|
||||
|
||||
static struct cpu_page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
|
||||
static struct page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
|
||||
static struct context_table ctx_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
|
||||
|
||||
static inline uint8_t*
|
||||
|
Reference in New Issue
Block a user