hv: mmu: remove alloc_page() API

No one would call this API for now. So remove it.

Tracked-On: #861
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1 2018-10-31 07:41:59 +08:00 committed by lijinxia
parent 0391f84c83
commit f1ed6c503c
8 changed files with 15 additions and 115 deletions

View File

@ -164,10 +164,6 @@ config MALLOC_ALIGN
range 8 32 range 8 32
default 16 default 16
config NUM_ALLOC_PAGES
hex "Capacity in pages of the heap for page_alloc()"
default 0x1000
config HEAP_SIZE config HEAP_SIZE
hex "Capacity of the heap for malloc()" hex "Capacity of the heap for malloc()"
default 0x100000 default 0x100000
@ -222,7 +218,7 @@ config HV_RAM_START
config HV_RAM_SIZE config HV_RAM_SIZE
hex "Size of the RAM region used by the hypervisor" hex "Size of the RAM region used by the hypervisor"
default 0x06000000 default 0x04800000
help help
A 64-bit integer indicating the size of RAM used by the hypervisor. A 64-bit integer indicating the size of RAM used by the hypervisor.
It is ensured at link time that the footprint of the hypervisor It is ensured at link time that the footprint of the hypervisor

View File

@ -533,7 +533,7 @@ void cpu_secondary_init(void)
/* Switch this CPU to use the same page tables set-up by the /* Switch this CPU to use the same page tables set-up by the
* primary/boot CPU * primary/boot CPU
*/ */
enable_paging(get_paging_pml4()); enable_paging();
enable_smep(); enable_smep();

View File

@ -30,7 +30,7 @@
#include <hypervisor.h> #include <hypervisor.h>
#include <reloc.h> #include <reloc.h>
static void *mmu_pml4_addr; static void *ppt_mmu_pml4_addr;
static void *sanitized_page[CPU_PAGE_SIZE]; static void *sanitized_page[CPU_PAGE_SIZE];
static struct vmx_capability { static struct vmx_capability {
@ -206,13 +206,7 @@ void sanitize_pte(uint64_t *pt_page)
} }
} }
uint64_t get_paging_pml4(void) void enable_paging(void)
{
/* Return address to caller */
return hva2hpa(mmu_pml4_addr);
}
void enable_paging(uint64_t pml4_base_addr)
{ {
uint64_t tmp64 = 0UL; uint64_t tmp64 = 0UL;
@ -220,7 +214,7 @@ void enable_paging(uint64_t pml4_base_addr)
CPU_CR_READ(cr0, &tmp64); CPU_CR_READ(cr0, &tmp64);
CPU_CR_WRITE(cr0, tmp64 | CR0_WP); CPU_CR_WRITE(cr0, tmp64 | CR0_WP);
CPU_CR_WRITE(cr3, pml4_base_addr); CPU_CR_WRITE(cr3, hva2hpa(ppt_mmu_pml4_addr));
} }
void enable_smep(void) void enable_smep(void)
@ -240,12 +234,12 @@ void init_paging(void)
uint32_t i; uint32_t i;
uint64_t low32_max_ram = 0UL; uint64_t low32_max_ram = 0UL;
uint64_t high64_max_ram; uint64_t high64_max_ram;
uint64_t attr_uc = (PAGE_TABLE | PAGE_CACHE_UC); uint64_t attr_uc = (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_CACHE_UC);
pr_dbg("HV MMU Initialization"); pr_dbg("HV MMU Initialization");
/* Allocate memory for Hypervisor PML4 table */ /* Allocate memory for Hypervisor PML4 table */
mmu_pml4_addr = ppt_mem_ops.get_pml4_page(ppt_mem_ops.info, 0UL); ppt_mmu_pml4_addr = ppt_mem_ops.get_pml4_page(ppt_mem_ops.info, 0UL);
init_e820(); init_e820();
obtain_e820_mem_info(); obtain_e820_mem_info();
@ -259,7 +253,7 @@ void init_paging(void)
} }
/* Map all memory regions to UC attribute */ /* Map all memory regions to UC attribute */
mmu_add((uint64_t *)mmu_pml4_addr, e820_mem.mem_bottom, e820_mem.mem_bottom, mmu_add((uint64_t *)ppt_mmu_pml4_addr, e820_mem.mem_bottom, e820_mem.mem_bottom,
high64_max_ram - e820_mem.mem_bottom, attr_uc, &ppt_mem_ops); high64_max_ram - e820_mem.mem_bottom, attr_uc, &ppt_mem_ops);
/* Modify WB attribute for E820_TYPE_RAM */ /* Modify WB attribute for E820_TYPE_RAM */
@ -275,49 +269,28 @@ void init_paging(void)
} }
} }
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, 0UL, (low32_max_ram + PDE_SIZE - 1UL) & PDE_MASK, mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, 0UL, (low32_max_ram + PDE_SIZE - 1UL) & PDE_MASK,
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY); PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, (1UL << 32U), high64_max_ram - (1UL << 32U), mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (1UL << 32U), high64_max_ram - (1UL << 32U),
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY); PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
/* set the paging-structure entries' U/S flag /* set the paging-structure entries' U/S flag
* to supervisor-mode for hypervisor owned memroy. * to supervisor-mode for hypervisor owned memroy.
*/ */
hv_hpa = get_hv_image_base(); hv_hpa = get_hv_image_base();
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa & PDE_MASK, mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, hv_hpa & PDE_MASK,
CONFIG_HV_RAM_SIZE + ((hv_hpa & (PDE_SIZE - 1UL)) != 0UL) ? PDE_SIZE : 0UL, CONFIG_HV_RAM_SIZE + ((hv_hpa & (PDE_SIZE - 1UL)) != 0UL) ? PDE_SIZE : 0UL,
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER, PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER,
&ppt_mem_ops, MR_MODIFY); &ppt_mem_ops, MR_MODIFY);
/* Enable paging */ /* Enable paging */
enable_paging(hva2hpa(mmu_pml4_addr)); enable_paging();
/* set ptep in sanitized_page point to itself */ /* set ptep in sanitized_page point to itself */
sanitize_pte((uint64_t *)sanitized_page); sanitize_pte((uint64_t *)sanitized_page);
} }
void *alloc_paging_struct(void)
{
void *ptr = NULL;
/* Allocate a page from Hypervisor heap */
ptr = alloc_page();
ASSERT(ptr != NULL, "page alloc failed!");
(void)memset(ptr, 0U, CPU_PAGE_SIZE);
return ptr;
}
void free_paging_struct(void *ptr)
{
if (ptr != NULL) {
(void)memset(ptr, 0U, CPU_PAGE_SIZE);
free(ptr);
}
}
bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg) bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg)
{ {
uint64_t curr_hpa; uint64_t curr_hpa;

View File

@ -134,10 +134,10 @@ struct iommu_domain {
}; };
struct context_table { struct context_table {
struct cpu_page buses[CONFIG_IOMMU_INIT_BUS_LIMIT]; struct page buses[CONFIG_IOMMU_INIT_BUS_LIMIT];
}; };
static struct cpu_page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE); static struct page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
static struct context_table ctx_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE); static struct context_table ctx_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
static inline uint8_t* static inline uint8_t*

View File

@ -67,16 +67,9 @@ enum _page_table_level {
#define PAGE_SIZE_2M MEM_2M #define PAGE_SIZE_2M MEM_2M
#define PAGE_SIZE_1G MEM_1G #define PAGE_SIZE_1G MEM_1G
struct cpu_page {
uint8_t contents[CPU_PAGE_SIZE];
};
void sanitize_pte_entry(uint64_t *ptep); void sanitize_pte_entry(uint64_t *ptep);
void sanitize_pte(uint64_t *pt_page); void sanitize_pte(uint64_t *pt_page);
uint64_t get_paging_pml4(void); void enable_paging(void);
void *alloc_paging_struct(void);
void free_paging_struct(void *ptr);
void enable_paging(uint64_t pml4_base_addr);
void enable_smep(void); void enable_smep(void);
void init_paging(void); void init_paging(void);
void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base, void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base,

View File

@ -25,9 +25,6 @@
#define PAGE_CACHE_UC_MINUS PAGE_PCD #define PAGE_CACHE_UC_MINUS PAGE_PCD
#define PAGE_CACHE_UC (PAGE_PCD | PAGE_PWT) #define PAGE_CACHE_UC (PAGE_PCD | PAGE_PWT)
#define PAGE_TABLE (PAGE_PRESENT | PAGE_RW | PAGE_USER)
#define EPT_RD (1UL << 0U) #define EPT_RD (1UL << 0U)
#define EPT_WR (1UL << 1U) #define EPT_WR (1UL << 1U)
#define EPT_EXE (1UL << 2U) #define EPT_EXE (1UL << 2U)

View File

@ -24,8 +24,6 @@ struct mem_pool {
/* APIs exposing memory allocation/deallocation abstractions */ /* APIs exposing memory allocation/deallocation abstractions */
void *malloc(unsigned int num_bytes); void *malloc(unsigned int num_bytes);
void *calloc(unsigned int num_elements, unsigned int element_size); void *calloc(unsigned int num_elements, unsigned int element_size);
void *alloc_page(void);
void *alloc_pages(unsigned int page_num);
void free(const void *ptr); void free(const void *ptr);
#endif /* MEM_MGT_H */ #endif /* MEM_MGT_H */

View File

@ -31,30 +31,6 @@ static struct mem_pool Memory_Pool = {
.contiguity_bitmap = Malloc_Heap_Contiguity_Bitmap .contiguity_bitmap = Malloc_Heap_Contiguity_Bitmap
}; };
/************************************************************************/
/* Memory pool declaration (block size = CPU_PAGE_SIZE) */
/************************************************************************/
static uint8_t __bss_noinit
Paging_Heap[CONFIG_NUM_ALLOC_PAGES][CPU_PAGE_SIZE] __aligned(CPU_PAGE_SIZE);
#define PAGING_HEAP_BUFF_SIZE CPU_PAGE_SIZE
#define PAGING_HEAP_TOTAL_BUFF CONFIG_NUM_ALLOC_PAGES
#define PAGING_HEAP_BITMAP_SIZE \
INT_DIV_ROUNDUP(PAGING_HEAP_TOTAL_BUFF, BITMAP_WORD_SIZE)
static uint32_t Paging_Heap_Bitmap[PAGING_HEAP_BITMAP_SIZE];
static uint32_t Paging_Heap_Contiguity_Bitmap[MALLOC_HEAP_BITMAP_SIZE];
static struct mem_pool Paging_Memory_Pool = {
.start_addr = Paging_Heap,
.spinlock = {.head = 0U, .tail = 0U},
.size = CONFIG_NUM_ALLOC_PAGES * CPU_PAGE_SIZE,
.buff_size = PAGING_HEAP_BUFF_SIZE,
.total_buffs = PAGING_HEAP_TOTAL_BUFF,
.bmp_size = PAGING_HEAP_BITMAP_SIZE,
.bitmap = Paging_Heap_Bitmap,
.contiguity_bitmap = Paging_Heap_Contiguity_Bitmap
};
static void *allocate_mem(struct mem_pool *pool, unsigned int num_bytes) static void *allocate_mem(struct mem_pool *pool, unsigned int num_bytes)
{ {
@ -247,11 +223,6 @@ void *malloc(unsigned int num_bytes)
* Request memory allocation from smaller segmented memory pool * Request memory allocation from smaller segmented memory pool
*/ */
memory = allocate_mem(&Memory_Pool, num_bytes); memory = allocate_mem(&Memory_Pool, num_bytes);
} else {
uint32_t page_num =
((num_bytes + CPU_PAGE_SIZE) - 1U) >> CPU_PAGE_SHIFT;
/* Request memory allocation through alloc_page */
memory = alloc_pages(page_num);
} }
/* Check if memory allocation is successful */ /* Check if memory allocation is successful */
@ -263,26 +234,6 @@ void *malloc(unsigned int num_bytes)
return memory; return memory;
} }
void *alloc_pages(unsigned int page_num)
{
void *memory = NULL;
/* Request memory allocation from Page-aligned memory pool */
memory = allocate_mem(&Paging_Memory_Pool, page_num * CPU_PAGE_SIZE);
/* Check if memory allocation is successful */
if (memory == NULL) {
pr_err("%s: failed to alloc %d pages", __func__, page_num);
}
return memory;
}
void *alloc_page(void)
{
return alloc_pages(1U);
}
void *calloc(unsigned int num_elements, unsigned int element_size) void *calloc(unsigned int num_elements, unsigned int element_size)
{ {
void *memory = malloc(num_elements * element_size); void *memory = malloc(num_elements * element_size);
@ -306,14 +257,6 @@ void free(const void *ptr)
/* Free buffer in 16-Bytes aligned Memory Pool */ /* Free buffer in 16-Bytes aligned Memory Pool */
deallocate_mem(&Memory_Pool, ptr); deallocate_mem(&Memory_Pool, ptr);
} }
/* Check if ptr belongs to page aligned Memory Pool */
else if ((Paging_Memory_Pool.start_addr < ptr) &&
(ptr < (Paging_Memory_Pool.start_addr +
(Paging_Memory_Pool.total_buffs *
Paging_Memory_Pool.buff_size)))) {
/* Free buffer in page aligned Memory Pool */
deallocate_mem(&Paging_Memory_Pool, ptr);
}
} }
void *memchr(const void *void_s, int c, size_t n) void *memchr(const void *void_s, int c, size_t n)