hv: mmu: add static paging table allocation for hypervisor

Add static paging table allocation API for hypervisor.
Note: must configure PLATFORM_RAM_SIZE and PLATFORM_MMIO_SIZE exactly as the platform.

Rename RAM_START/RAM_SIZE to HV_RAM_START/HV_RAM_SIZE for HV.

Tracked-On: #861
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1
2018-10-29 23:28:32 +08:00
committed by lijinxia
parent 74a5eec3a7
commit dc9d18a868
12 changed files with 163 additions and 39 deletions

View File

@@ -208,25 +208,32 @@ config LOW_RAM_SIZE
A 32-bit integer indicating the size of RAM region below address
0x10000, starting from address 0x0.
config RAM_START
hex "Address of the RAM region assigned to the hypervisor"
config HV_RAM_START
hex "Start physical address of the RAM region used by the hypervisor"
default 0x6e000000 if PLATFORM_SBL
default 0x00100000 if PLATFORM_UEFI
help
A 64-bit integer indicating the base address to where the hypervisor
A 64-bit integer indicating the base physical address to where the hypervisor
should be loaded to. If RELOC is disabled the bootloader is required
to load the hypervisor to this specific address. Otherwise the
hypervisor will not boot. With RELOC enabled the hypervisor is capable
of relocating its symbols to where it is placed at, and thus the
bootloader may not place the hypervisor at this specific address.
config RAM_SIZE
hex "Size of the RAM region assigned to the hypervisor"
default 0x02000000
config HV_RAM_SIZE
hex "Size of the RAM region used by the hypervisor"
default 0x04000000
help
A 64-bit integer indicating the size of RAM assigned to the
hypervisor. It is ensured at link time that the footprint of the
hypervisor does not exceed this size.
A 64-bit integer indicating the size of RAM used by the hypervisor.
It is ensured at link time that the footprint of the hypervisor
does not exceed this size.
config PLATFORM_RAM_SIZE
hex "Size of the physical platform RAM"
default 0x200000000
help
A 64-bit integer indicating the size of the physical platform RAM
(not included the MMIO).
config CONSTANT_ACPI
bool "The platform ACPI info is constant"

View File

@@ -537,8 +537,8 @@ static void rebuild_vm0_e820(void)
uint32_t i;
uint64_t entry_start;
uint64_t entry_end;
uint64_t hv_start = get_hv_image_base();
uint64_t hv_end = hv_start + CONFIG_RAM_SIZE;
uint64_t hv_start_pa = get_hv_image_base();
uint64_t hv_end_pa = hv_start_pa + CONFIG_HV_RAM_SIZE;
struct e820_entry *entry, new_entry = {0};
/* hypervisor mem need be filter out from e820 table
@@ -550,36 +550,36 @@ static void rebuild_vm0_e820(void)
entry_end = entry->baseaddr + entry->length;
/* No need handle in these cases*/
if ((entry->type != E820_TYPE_RAM) || (entry_end <= hv_start)
|| (entry_start >= hv_end)) {
if ((entry->type != E820_TYPE_RAM) || (entry_end <= hv_start_pa)
|| (entry_start >= hv_end_pa)) {
continue;
}
/* filter out hv mem and adjust length of this entry*/
if ((entry_start < hv_start) && (entry_end <= hv_end)) {
entry->length = hv_start - entry_start;
if ((entry_start < hv_start_pa) && (entry_end <= hv_end_pa)) {
entry->length = hv_start_pa - entry_start;
continue;
}
/* filter out hv mem and need to create a new entry*/
if ((entry_start < hv_start) && (entry_end > hv_end)) {
entry->length = hv_start - entry_start;
new_entry.baseaddr = hv_end;
new_entry.length = entry_end - hv_end;
if ((entry_start < hv_start_pa) && (entry_end > hv_end_pa)) {
entry->length = hv_start_pa - entry_start;
new_entry.baseaddr = hv_end_pa;
new_entry.length = entry_end - hv_end_pa;
new_entry.type = E820_TYPE_RAM;
continue;
}
/* This entry is within the range of hv mem
* change to E820_TYPE_RESERVED
*/
if ((entry_start >= hv_start) && (entry_end <= hv_end)) {
if ((entry_start >= hv_start_pa) && (entry_end <= hv_end_pa)) {
entry->type = E820_TYPE_RESERVED;
continue;
}
if ((entry_start >= hv_start) && (entry_start < hv_end)
&& (entry_end > hv_end)) {
entry->baseaddr = hv_end;
entry->length = entry_end - hv_end;
if ((entry_start >= hv_start_pa) && (entry_start < hv_end_pa)
&& (entry_end > hv_end_pa)) {
entry->baseaddr = hv_end_pa;
entry->length = entry_end - hv_end_pa;
continue;
}
@@ -595,7 +595,7 @@ static void rebuild_vm0_e820(void)
entry->type = new_entry.type;
}
e820_mem.total_mem_size -= CONFIG_RAM_SIZE;
e820_mem.total_mem_size -= CONFIG_HV_RAM_SIZE;
}
/**
@@ -649,7 +649,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
* will cause EPT violation if sos accesses hv memory
*/
hv_hpa = get_hv_image_base();
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_RAM_SIZE);
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_HV_RAM_SIZE);
return 0;
}

View File

@@ -268,7 +268,7 @@ void init_paging(void)
* to supervisor-mode for hypervisor owned memroy.
*/
hv_hpa = get_hv_image_base();
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa, CONFIG_RAM_SIZE,
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa, CONFIG_HV_RAM_SIZE,
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER,
PTT_PRIMARY, MR_MODIFY);

View File

@@ -0,0 +1,67 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <hypervisor.h>
#define PML4_PAGE_NUM(size) 1UL
#define PDPT_PAGE_NUM(size) (((size) + PML4E_SIZE - 1UL) >> PML4E_SHIFT)
#define PD_PAGE_NUM(size) (((size) + PDPTE_SIZE - 1UL) >> PDPTE_SHIFT)
#define PT_PAGE_NUM(size) (((size) + PDE_SIZE - 1UL) >> PDE_SHIFT)
#define DEFINE_PGTABLE_PAGE(prefix, lvl, LVL, size) \
static struct page prefix ## lvl ## _pages[LVL ## _PAGE_NUM(size)]
DEFINE_PGTABLE_PAGE(ppt_, pml4, PML4, CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE);
DEFINE_PGTABLE_PAGE(ppt_, pdpt, PDPT, CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE);
DEFINE_PGTABLE_PAGE(ppt_, pd, PD, CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE);
/* ppt: pripary page table */
static union pgtable_pages_info ppt_pages_info = {
.ppt = {
.pml4_base = ppt_pml4_pages,
.pdpt_base = ppt_pdpt_pages,
.pd_base = ppt_pd_pages,
}
};
static inline uint64_t ppt_get_default_access_right(void)
{
return (PAGE_PRESENT | PAGE_RW | PAGE_USER);
}
static inline uint64_t ppt_pgentry_present(uint64_t pte)
{
return pte & PAGE_PRESENT;
}
static inline struct page *ppt_get_pml4_page(const union pgtable_pages_info *info, __unused uint64_t gpa)
{
struct page *page = info->ppt.pml4_base;
(void)memset(page, 0U, PAGE_SIZE);
return page;
}
static inline struct page *ppt_get_pdpt_page(const union pgtable_pages_info *info, uint64_t gpa)
{
struct page *page = info->ppt.pdpt_base + (gpa >> PML4E_SHIFT);
(void)memset(page, 0U, PAGE_SIZE);
return page;
}
static inline struct page *ppt_get_pd_page(const union pgtable_pages_info *info, uint64_t gpa)
{
struct page *page = info->ppt.pd_base + (gpa >> PDPTE_SHIFT);
(void)memset(page, 0U, PAGE_SIZE);
return page;
}
const struct memory_ops ppt_mem_ops = {
.info = &ppt_pages_info,
.get_default_access_right = ppt_get_default_access_right,
.pgentry_present = ppt_pgentry_present,
.get_pml4_page = ppt_get_pml4_page,
.get_pdpt_page = ppt_get_pdpt_page,
.get_pd_page = ppt_get_pd_page,
};