HV: improve e820 interfaces and their usages

1. change its APIs as more indepentent, and modify the callers' code
2. limit its global variables as static, and return const to the callers
3. remove unused code in "CONFIG_CMA"

Tracked-On: #1842
Signed-off-by: Minggui Cao <minggui.cao@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Minggui Cao 2018-12-07 16:49:05 +08:00 committed by wenlingz
parent b69d24b1d4
commit 9b58b9d11d
8 changed files with 93 additions and 93 deletions

View File

@ -382,6 +382,7 @@ void bsp_boot_init(void)
load_cpu_state_data();
/* Initialize the hypervisor paging */
init_e820();
init_paging();
if (!cpu_has_cap(X86_FEATURE_X2APIC)) {

View File

@ -9,14 +9,18 @@
#include <multiboot.h>
#include <e820.h>
/* for VM0 e820 */
uint32_t e820_entries;
struct e820_entry e820[E820_MAX_ENTRIES];
struct e820_mem_params e820_mem;
/*
* e820.c contains the related e820 operations; like HV to get memory info for its MMU setup;
* and hide HV memory from VM0...
*/
static uint32_t e820_entries;
static struct e820_entry e820[E820_MAX_ENTRIES];
static struct e820_mem_params e820_mem;
#define ACRN_DBG_E820 6U
void obtain_e820_mem_info(void)
static void obtain_e820_mem_info(void)
{
uint32_t i;
struct e820_entry *entry;
@ -47,6 +51,7 @@ void obtain_e820_mem_info(void)
}
}
/* before boot vm0(service OS), call it to hide the HV RAM entry in e820 table from vm0 */
void rebuild_vm0_e820(void)
{
uint32_t i;
@ -111,6 +116,7 @@ void rebuild_vm0_e820(void)
e820_mem.total_mem_size -= CONFIG_HV_RAM_SIZE;
}
/* get some RAM below 1MB in e820 entries, hide it from vm0, return its start address */
uint64_t e820_alloc_low_memory(uint32_t size_arg)
{
uint32_t i;
@ -162,6 +168,7 @@ uint64_t e820_alloc_low_memory(uint32_t size_arg)
return ACRN_INVALID_HPA;
}
/* HV read multiboot header to get e820 entries info and calc total RAM info */
void init_e820(void)
{
uint32_t i;
@ -193,11 +200,27 @@ void init_e820(void)
}
}
obtain_e820_mem_info();
} else {
panic("no multiboot info found");
}
}
uint32_t get_e820_entries_count(void)
{
return e820_entries;
}
const struct e820_entry *get_e820_entry(void)
{
return e820;
}
const struct e820_mem_params *get_e820_mem_info(void)
{
return &e820_mem;
}
#ifdef CONFIG_PARTITION_MODE
uint32_t create_e820_table(struct e820_entry *param_e820)
{

View File

@ -459,46 +459,42 @@ int copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
* @pre vm != NULL
* @pre is_vm0(vm) == true
*/
int prepare_vm0_memmap_and_e820(struct acrn_vm *vm)
int prepare_vm0_memmap(struct acrn_vm *vm)
{
uint32_t i;
uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED);
struct e820_entry *entry;
uint64_t hv_hpa;
uint64_t *pml4_page = (uint64_t *)vm->arch_vm.nworld_eptp;
rebuild_vm0_e820();
dev_dbg(ACRN_DBG_GUEST,
"vm0: bottom memory - 0x%llx, top memory - 0x%llx\n",
e820_mem.mem_bottom, e820_mem.mem_top);
const struct e820_entry *entry;
uint32_t entries_count = get_e820_entries_count();
const struct e820_entry *p_e820 = get_e820_entry();
const struct e820_mem_params *p_e820_mem_info = get_e820_mem_info();
if (e820_mem.mem_top > EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE)) {
dev_dbg(ACRN_DBG_GUEST, "vm0: bottom memory - 0x%llx, top memory - 0x%llx\n",
p_e820_mem_info->mem_bottom, p_e820_mem_info->mem_top);
if (p_e820_mem_info->mem_top > EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE)) {
panic("Please configure VM0_ADDRESS_SPACE correctly!\n");
}
/* create real ept map for all ranges with UC */
ept_mr_add(vm, pml4_page,
e820_mem.mem_bottom, e820_mem.mem_bottom,
(e820_mem.mem_top - e820_mem.mem_bottom),
attr_uc);
ept_mr_add(vm, pml4_page, p_e820_mem_info->mem_bottom, p_e820_mem_info->mem_bottom,
(p_e820_mem_info->mem_top - p_e820_mem_info->mem_bottom), attr_uc);
/* update ram entries to WB attr */
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
for (i = 0U; i < entries_count; i++) {
entry = p_e820 + i;
if (entry->type == E820_TYPE_RAM) {
ept_mr_modify(vm, pml4_page,
entry->baseaddr, entry->length,
EPT_WB, EPT_MT_MASK);
ept_mr_modify(vm, pml4_page, entry->baseaddr, entry->length, EPT_WB, EPT_MT_MASK);
}
}
dev_dbg(ACRN_DBG_GUEST, "VM0 e820 layout:\n");
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
dev_dbg(ACRN_DBG_GUEST,
"e820 table: %d type: 0x%x", i, entry->type);
dev_dbg(ACRN_DBG_GUEST,
"BaseAddress: 0x%016llx length: 0x%016llx\n",
for (i = 0U; i < entries_count; i++) {
entry = p_e820 + i;
dev_dbg(ACRN_DBG_GUEST, "e820 table: %d type: 0x%x", i, entry->type);
dev_dbg(ACRN_DBG_GUEST, "BaseAddress: 0x%016llx length: 0x%016llx\n",
entry->baseaddr, entry->length);
}

View File

@ -98,7 +98,8 @@ int create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
/* Only for SOS: Configure VM software information */
/* For UOS: This VM software information is configure in DM */
if (is_vm0(vm)) {
status = prepare_vm0_memmap_and_e820(vm);
rebuild_vm0_e820();
status = prepare_vm0_memmap(vm);
if (status != 0) {
goto err;
}

View File

@ -228,36 +228,35 @@ void enable_smep(void)
void init_paging(void)
{
struct e820_entry *entry;
uint64_t hv_hpa;
uint32_t i;
uint64_t low32_max_ram = 0UL;
uint64_t high64_max_ram;
uint64_t attr_uc = (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_CACHE_UC);
const struct e820_entry *entry;
uint32_t entries_count = get_e820_entries_count();
const struct e820_entry *p_e820 = get_e820_entry();
const struct e820_mem_params *p_e820_mem_info = get_e820_mem_info();
pr_dbg("HV MMU Initialization");
/* Allocate memory for Hypervisor PML4 table */
ppt_mmu_pml4_addr = ppt_mem_ops.get_pml4_page(ppt_mem_ops.info);
init_e820();
obtain_e820_mem_info();
/* align to 2MB */
high64_max_ram = (e820_mem.mem_top + PDE_SIZE - 1UL) & PDE_MASK;
high64_max_ram = (p_e820_mem_info->mem_top + PDE_SIZE - 1UL) & PDE_MASK;
if ((high64_max_ram > (CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)) ||
(high64_max_ram < (1UL << 32U))) {
panic("Please configure HV_ADDRESS_SPACE correctly!\n");
}
/* Allocate memory for Hypervisor PML4 table */
ppt_mmu_pml4_addr = ppt_mem_ops.get_pml4_page(ppt_mem_ops.info);
/* Map all memory regions to UC attribute */
mmu_add((uint64_t *)ppt_mmu_pml4_addr, 0UL, 0UL,
high64_max_ram - 0UL, attr_uc, &ppt_mem_ops);
mmu_add((uint64_t *)ppt_mmu_pml4_addr, 0UL, 0UL, high64_max_ram - 0UL, attr_uc, &ppt_mem_ops);
/* Modify WB attribute for E820_TYPE_RAM */
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
for (i = 0U; i < entries_count; i++) {
entry = p_e820 + i;
if (entry->type == E820_TYPE_RAM) {
if (entry->baseaddr < (1UL << 32U)) {
uint64_t end = entry->baseaddr + entry->length;
@ -281,12 +280,10 @@ void init_paging(void)
hv_hpa = get_hv_image_base();
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, hv_hpa & PDE_MASK,
CONFIG_HV_RAM_SIZE + (((hv_hpa & (PDE_SIZE - 1UL)) != 0UL) ? PDE_SIZE : 0UL),
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER,
&ppt_mem_ops, MR_MODIFY);
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER, &ppt_mem_ops, MR_MODIFY);
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (uint64_t)get_reserve_sworld_memory_base(),
TRUSTY_RAM_SIZE * (CONFIG_MAX_VM_NUM - 1U),
PAGE_USER, 0UL, &ppt_mem_ops, MR_MODIFY);
TRUSTY_RAM_SIZE * (CONFIG_MAX_VM_NUM - 1U), PAGE_USER, 0UL, &ppt_mem_ops, MR_MODIFY);
/* Enable paging */
enable_paging();

View File

@ -83,6 +83,7 @@ int general_sw_loader(struct acrn_vm *vm)
struct sw_linux *linux_info = &(vm->sw.linux_info);
struct sw_kernel_info *sw_kernel = &(vm->sw.kernel_info);
struct acrn_vcpu *vcpu = get_primary_vcpu(vm);
const struct e820_mem_params *p_e820_mem_info = get_e820_mem_info();
pr_dbg("Loading guest to run-time location");
@ -97,14 +98,11 @@ int general_sw_loader(struct acrn_vm *vm)
kernel_entry_offset += 512U;
}
sw_kernel->kernel_entry_addr =
(void *)((uint64_t)sw_kernel->kernel_load_addr
+ kernel_entry_offset);
sw_kernel->kernel_entry_addr = (void *)((uint64_t)sw_kernel->kernel_load_addr + kernel_entry_offset);
if (is_vcpu_bsp(vcpu)) {
/* Set VCPU entry point to kernel entry */
vcpu_set_rip(vcpu, (uint64_t)sw_kernel->kernel_entry_addr);
pr_info("%s, VM %hu VCPU %hu Entry: 0x%016llx ",
__func__, vm->vm_id, vcpu->vcpu_id,
pr_info("%s, VM %hu VCPU %hu Entry: 0x%016llx ", __func__, vm->vm_id, vcpu->vcpu_id,
sw_kernel->kernel_entry_addr);
}
@ -112,9 +110,7 @@ int general_sw_loader(struct acrn_vm *vm)
hva = gpa2hva(vm, (uint64_t)sw_kernel->kernel_load_addr);
/* Copy the guest kernel image to its run-time location */
(void)memcpy_s((void *)hva, sw_kernel->kernel_size,
sw_kernel->kernel_src_addr,
sw_kernel->kernel_size);
(void)memcpy_s((void *)hva, sw_kernel->kernel_size, sw_kernel->kernel_src_addr, sw_kernel->kernel_size);
/* See if guest is a Linux guest */
if (vm->sw.kernel_type == VM_LINUX_GUEST) {
@ -128,24 +124,11 @@ int general_sw_loader(struct acrn_vm *vm)
}
/* Get host-physical address for guest bootargs */
hva = gpa2hva(vm,
(uint64_t)linux_info->bootargs_load_addr);
hva = gpa2hva(vm, (uint64_t)linux_info->bootargs_load_addr);
/* Copy Guest OS bootargs to its load location */
(void)strcpy_s((char *)hva, MEM_2K,
linux_info->bootargs_src_addr);
(void)strcpy_s((char *)hva, MEM_2K, linux_info->bootargs_src_addr);
#ifdef CONFIG_CMA
/* add "cma=XXXXM@0xXXXXXXXX" to cmdline*/
if (is_vm0(vm) && (e820_mem.max_ram_blk_size > 0)) {
snprintf(dyn_bootargs, 100U, " cma=%dM@0x%llx",
(e820_mem.max_ram_blk_size >> 20U),
e820_mem.max_ram_blk_base);
(void)strcpy_s((char *)hva
+ linux_info->bootargs_size,
100U, dyn_bootargs);
}
#else
/* add "hugepagesz=1G hugepages=x" to cmdline for 1G hugepage
* reserving. Current strategy is "total_mem_size in Giga -
* remained 1G pages" for reserving.
@ -154,34 +137,24 @@ int general_sw_loader(struct acrn_vm *vm)
int32_t reserving_1g_pages;
#ifdef CONFIG_REMAIN_1G_PAGES
reserving_1g_pages = (e820_mem.total_mem_size >> 30U) -
CONFIG_REMAIN_1G_PAGES;
reserving_1g_pages = (p_e820_mem_info->total_mem_size >> 30U) - CONFIG_REMAIN_1G_PAGES;
#else
reserving_1g_pages = (e820_mem.total_mem_size >> 30U) -
3;
reserving_1g_pages = (p_e820_mem_info->total_mem_size >> 30U) - 3;
#endif
if (reserving_1g_pages > 0) {
snprintf(dyn_bootargs, 100U,
" hugepagesz=1G hugepages=%d",
reserving_1g_pages);
(void)strcpy_s((char *)hva
+ linux_info->bootargs_size,
100U, dyn_bootargs);
snprintf(dyn_bootargs, 100U, " hugepagesz=1G hugepages=%d", reserving_1g_pages);
(void)strcpy_s((char *)hva + linux_info->bootargs_size, 100U, dyn_bootargs);
}
}
#endif
/* Check if a RAM disk is present with Linux guest */
if (linux_info->ramdisk_src_addr != NULL) {
/* Get host-physical address for guest RAM disk */
hva = gpa2hva(vm,
(uint64_t)linux_info->ramdisk_load_addr);
hva = gpa2hva(vm, (uint64_t)linux_info->ramdisk_load_addr);
/* Copy RAM disk to its load location */
(void)memcpy_s((void *)hva,
linux_info->ramdisk_size,
linux_info->ramdisk_src_addr,
linux_info->ramdisk_size);
(void)memcpy_s((void *)hva, linux_info->ramdisk_size,
linux_info->ramdisk_src_addr, linux_info->ramdisk_size);
}
@ -191,8 +164,7 @@ int general_sw_loader(struct acrn_vm *vm)
vcpu_set_gpreg(vcpu, CPU_REG_RSI, create_zero_page(vm));
pr_info("%s, RSI pointing to zero page for VM %d at GPA %X",
__func__, vm->vm_id,
vcpu_get_gpreg(vcpu, CPU_REG_RSI));
__func__, vm->vm_id, vcpu_get_gpreg(vcpu, CPU_REG_RSI));
} else {
pr_err("%s, Loading VM SW failed", __func__);

View File

@ -12,17 +12,27 @@ struct e820_mem_params {
uint64_t max_ram_blk_size;
};
/* HV read multiboot header to get e820 entries info and calc total RAM info */
void init_e820(void);
void obtain_e820_mem_info(void);
/* before boot vm0(service OS), call it to hide the HV RAM entry in e820 table from vm0 */
void rebuild_vm0_e820(void);
/* get some RAM below 1MB in e820 entries, hide it from vm0, return its start address */
uint64_t e820_alloc_low_memory(uint32_t size_arg);
extern uint32_t e820_entries;
extern struct e820_entry e820[E820_MAX_ENTRIES];
extern struct e820_mem_params e820_mem;
/* copy the original e820 entries info to param_e820 */
uint32_t create_e820_table(struct e820_entry *param_e820);
/* get total number of the e820 entries */
uint32_t get_e820_entries_count(void);
/* get the e802 entiries */
const struct e820_entry *get_e820_entry(void);
/* get the e820 total memory info */
const struct e820_mem_params *get_e820_mem_info(void);
#ifdef CONFIG_PARTITION_MODE
/*
* Default e820 mem map:

View File

@ -70,7 +70,7 @@
#define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
#define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */
int prepare_vm0_memmap_and_e820(struct acrn_vm *vm);
int prepare_vm0_memmap(struct acrn_vm *vm);
/* Definition for a mem map lookup */
struct vm_lu_mem_map {
struct list_head list; /* EPT mem map lookup list*/