mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-12-12 07:52:32 +00:00
Currently ept_pages_info[] is initialized with first element only that force VM of id 0 using SOS EPT pages. This is incorrect for logical partition and hybrid scenario. Considering SOS_RAM_SIZE and UOS_RAM_SIZE are configured separately, we should use different ept pages accordingly. So, the PRE_VM_NUM/SOS_VM_NUM and MAX_POST_VM_NUM macros are introduced to resolve this issue. The macros would be generated by acrn-config tool when user configure ACRN for their specific scenario. One more thing, that when UOS_RAM_SIZE is less then 2GB, the EPT address range should be (4G + PLATFORM_HI_MMIO_SIZE). Tracked-On: #4458 Signed-off-by: Victor Sun <victor.sun@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
87 lines
2.7 KiB
C
87 lines
2.7 KiB
C
/*
|
|
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#ifndef PAGE_H
|
|
#define PAGE_H
|
|
|
|
#define PAGE_SHIFT 12U
|
|
#define PAGE_SIZE (1U << PAGE_SHIFT)
|
|
#define PAGE_MASK 0xFFFFFFFFFFFFF000UL
|
|
|
|
/* size of the low MMIO address space: 2GB */
|
|
#define PLATFORM_LO_MMIO_SIZE 0x80000000UL
|
|
|
|
/* size of the high MMIO address space: 1GB */
|
|
#define PLATFORM_HI_MMIO_SIZE 0x40000000UL
|
|
|
|
#define PML4_PAGE_NUM(size) 1UL
|
|
#define PDPT_PAGE_NUM(size) (((size) + PML4E_SIZE - 1UL) >> PML4E_SHIFT)
|
|
#define PD_PAGE_NUM(size) (((size) + PDPTE_SIZE - 1UL) >> PDPTE_SHIFT)
|
|
#define PT_PAGE_NUM(size) (((size) + PDE_SIZE - 1UL) >> PDE_SHIFT)
|
|
|
|
/*
|
|
* The size of the guest physical address space, covered by the EPT page table of a VM.
|
|
* With the assumptions:
|
|
* - The GPA of DRAM & MMIO are contiguous.
|
|
* - Guest OS won't re-program device MMIO bars to the address not covered by
|
|
* this EPT_ADDRESS_SPACE.
|
|
*/
|
|
#define EPT_ADDRESS_SPACE(size) ((size > MEM_2G) ? \
|
|
((size) + PLATFORM_LO_MMIO_SIZE + PLATFORM_HI_MMIO_SIZE) \
|
|
: (MEM_2G + PLATFORM_LO_MMIO_SIZE + PLATFORM_HI_MMIO_SIZE))
|
|
|
|
#define TRUSTY_PML4_PAGE_NUM(size) (1UL)
|
|
#define TRUSTY_PDPT_PAGE_NUM(size) (1UL)
|
|
#define TRUSTY_PD_PAGE_NUM(size) (PD_PAGE_NUM(size))
|
|
#define TRUSTY_PT_PAGE_NUM(size) (PT_PAGE_NUM(size))
|
|
#define TRUSTY_PGTABLE_PAGE_NUM(size) \
|
|
(TRUSTY_PML4_PAGE_NUM(size) + TRUSTY_PDPT_PAGE_NUM(size) + TRUSTY_PD_PAGE_NUM(size) + TRUSTY_PT_PAGE_NUM(size))
|
|
|
|
struct acrn_vm;
|
|
|
|
struct page {
|
|
uint8_t contents[PAGE_SIZE];
|
|
} __aligned(PAGE_SIZE);
|
|
|
|
union pgtable_pages_info {
|
|
struct {
|
|
struct page *pml4_base;
|
|
struct page *pdpt_base;
|
|
struct page *pd_base;
|
|
struct page *pt_base;
|
|
} ppt;
|
|
struct {
|
|
uint64_t top_address_space;
|
|
struct page *nworld_pml4_base;
|
|
struct page *nworld_pdpt_base;
|
|
struct page *nworld_pd_base;
|
|
struct page *nworld_pt_base;
|
|
struct page *sworld_pgtable_base;
|
|
struct page *sworld_memory_base;
|
|
} ept;
|
|
};
|
|
|
|
struct memory_ops {
|
|
union pgtable_pages_info *info;
|
|
bool large_page_enabled;
|
|
uint64_t (*get_default_access_right)(void);
|
|
uint64_t (*pgentry_present)(uint64_t pte);
|
|
struct page *(*get_pml4_page)(const union pgtable_pages_info *info);
|
|
struct page *(*get_pdpt_page)(const union pgtable_pages_info *info, uint64_t gpa);
|
|
struct page *(*get_pd_page)(const union pgtable_pages_info *info, uint64_t gpa);
|
|
struct page *(*get_pt_page)(const union pgtable_pages_info *info, uint64_t gpa);
|
|
void *(*get_sworld_memory_base)(const union pgtable_pages_info *info);
|
|
void (*clflush_pagewalk)(const void *p);
|
|
void (*tweak_exe_right)(uint64_t *entry);
|
|
void (*recover_exe_right)(uint64_t *entry);
|
|
};
|
|
|
|
extern const struct memory_ops ppt_mem_ops;
|
|
void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id);
|
|
void *get_reserve_sworld_memory_base(void);
|
|
|
|
#endif /* PAGE_H */
|