hv: mmu: replace dynamic memory allocation in memory

Replace dynamic memory allocation in memory management with static memory allocation.
Since the static memory allocation can guarantee the allocation never failed, so
split_large_page and construct_pgentry don't need to return a errno any more.
Besides, the destroy_ept don't need to free page any more. Instead, it would memset
the eptp to 0 to make sure we can't walk this paging table again.

Tracked-On: #861
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1
2018-10-30 05:55:57 +08:00
committed by lijinxia
parent 9c7c0de08f
commit 0391f84c83
13 changed files with 147 additions and 236 deletions

View File

@@ -35,6 +35,7 @@
#ifndef ASSEMBLER
#include <cpu.h>
#include <page.h>
/* Define cache line size (in bytes) */
#define CACHE_LINE_SIZE 64U
@@ -53,11 +54,6 @@ static inline uint64_t round_page_down(uint64_t addr)
return (addr & CPU_PAGE_MASK);
}
enum _page_table_type {
PTT_PRIMARY = 0, /* Mapping for hypervisor */
PTT_EPT = 1,
};
/* Represent the 4 levels of translation tables in IA-32e paging mode */
enum _page_table_level {
IA32E_PML4 = 0,
@@ -83,13 +79,10 @@ void free_paging_struct(void *ptr);
void enable_paging(uint64_t pml4_base_addr);
void enable_smep(void);
void init_paging(void);
void mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
uint64_t vaddr_base, uint64_t size,
uint64_t prot, enum _page_table_type ptt);
void mmu_modify_or_del(uint64_t *pml4_page,
uint64_t vaddr_base, uint64_t size,
uint64_t prot_set, uint64_t prot_clr,
enum _page_table_type ptt, uint32_t type);
void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base,
uint64_t size, uint64_t prot, const struct memory_ops *mem_ops);
void mmu_modify_or_del(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
uint64_t prot_set, uint64_t prot_clr, const struct memory_ops *mem_ops, uint32_t type);
int check_vmx_mmu_cap(void);
uint16_t allocate_vpid(void);
void flush_vpid_single(uint16_t vpid);
@@ -100,7 +93,7 @@ bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg);
*@pre (pml4_page != NULL) && (pg_size != NULL)
*/
uint64_t *lookup_address(uint64_t *pml4_page, uint64_t addr,
uint64_t *pg_size, enum _page_table_type ptt);
uint64_t *pg_size, const struct memory_ops *mem_ops);
#pragma pack(1)
@@ -166,7 +159,6 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
*/
void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
uint64_t size);
void free_ept_mem(uint64_t *pml4_page);
int ept_violation_vmexit_handler(struct vcpu *vcpu);
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu);

View File

@@ -13,6 +13,21 @@
/* size of the low MMIO address space: 2GB */
#define PLATFORM_LO_MMIO_SIZE 0x80000000UL
#define PML4_PAGE_NUM(size) 1UL
#define PDPT_PAGE_NUM(size) (((size) + PML4E_SIZE - 1UL) >> PML4E_SHIFT)
#define PD_PAGE_NUM(size) (((size) + PDPTE_SIZE - 1UL) >> PDPTE_SHIFT)
#define PT_PAGE_NUM(size) (((size) + PDE_SIZE - 1UL) >> PDE_SHIFT)
/* The size of the guest physical address space, covered by the EPT page table of a VM */
#define EPT_ADDRESS_SPACE(size) ((size != 0UL) ? (size + PLATFORM_LO_MMIO_SIZE) : 0UL)
#define TRUSTY_PML4_PAGE_NUM(size) (1UL)
#define TRUSTY_PDPT_PAGE_NUM(size) (1UL)
#define TRUSTY_PD_PAGE_NUM(size) (PD_PAGE_NUM(size))
#define TRUSTY_PT_PAGE_NUM(size) (PT_PAGE_NUM(size))
#define TRUSTY_PGTABLE_PAGE_NUM(size) \
(TRUSTY_PML4_PAGE_NUM(size) + TRUSTY_PDPT_PAGE_NUM(size) + TRUSTY_PD_PAGE_NUM(size) + TRUSTY_PT_PAGE_NUM(size))
struct page {
uint8_t contents[PAGE_SIZE];
} __aligned(PAGE_SIZE);

View File

@@ -162,9 +162,4 @@ static inline uint64_t pdpte_large(uint64_t pdpte)
return pdpte & PAGE_PSE;
}
static inline uint64_t pgentry_present(enum _page_table_type ptt, uint64_t pte)
{
return (ptt == PTT_PRIMARY) ? (pte & PAGE_PRESENT) : (pte & EPT_RWX);
}
#endif /* PGTABLE_H */