mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-16 07:15:21 +00:00
hv: vtd: remove dynamic page allocation for root&ctx table
Preserve pages for each vtd root table and ctx table Tracked-On: #861 Signed-off-by: Tw <wei.tan@intel.com> Reviewed-by: Binbin Wu <binbin.wu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
1b1338bc04
commit
f05bfeb961
@ -94,6 +94,7 @@ enum dmar_iirg_type {
|
|||||||
|
|
||||||
/* dmar unit runtime data */
|
/* dmar unit runtime data */
|
||||||
struct dmar_drhd_rt {
|
struct dmar_drhd_rt {
|
||||||
|
uint32_t index;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
||||||
@ -137,6 +138,25 @@ struct iommu_domain {
|
|||||||
uint64_t trans_table_ptr;
|
uint64_t trans_table_ptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct context_table {
|
||||||
|
struct cpu_page buses[CONFIG_IOMMU_INIT_BUS_LIMIT];
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct cpu_page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
|
||||||
|
static struct context_table ctx_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
|
||||||
|
|
||||||
|
static inline uint8_t*
|
||||||
|
get_root_table(uint32_t dmar_index)
|
||||||
|
{
|
||||||
|
return root_tables[dmar_index].contents;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint8_t*
|
||||||
|
get_ctx_table(uint32_t dmar_index, uint8_t bus_no)
|
||||||
|
{
|
||||||
|
return ctx_tables[dmar_index].buses[bus_no].contents;
|
||||||
|
}
|
||||||
|
|
||||||
static struct list_head dmar_drhd_units;
|
static struct list_head dmar_drhd_units;
|
||||||
static uint32_t dmar_hdrh_unit_count;
|
static uint32_t dmar_hdrh_unit_count;
|
||||||
|
|
||||||
@ -164,6 +184,7 @@ static void register_hrhd_units(void)
|
|||||||
for (i = 0U; i < info->drhd_count; i++) {
|
for (i = 0U; i < info->drhd_count; i++) {
|
||||||
drhd_rt = calloc(1U, sizeof(struct dmar_drhd_rt));
|
drhd_rt = calloc(1U, sizeof(struct dmar_drhd_rt));
|
||||||
ASSERT(drhd_rt != NULL, "");
|
ASSERT(drhd_rt != NULL, "");
|
||||||
|
drhd_rt->index = i;
|
||||||
drhd_rt->drhd = &info->drhd_units[i];
|
drhd_rt->drhd = &info->drhd_units[i];
|
||||||
drhd_rt->dmar_irq = IRQ_INVALID;
|
drhd_rt->dmar_irq = IRQ_INVALID;
|
||||||
dmar_register_hrhd(drhd_rt);
|
dmar_register_hrhd(drhd_rt);
|
||||||
@ -655,7 +676,6 @@ static void dmar_set_root_table(struct dmar_drhd_rt *dmar_uint)
|
|||||||
{
|
{
|
||||||
uint64_t address;
|
uint64_t address;
|
||||||
uint32_t status;
|
uint32_t status;
|
||||||
void *root_table_vaddr = NULL;
|
|
||||||
|
|
||||||
spinlock_obtain(&(dmar_uint->lock));
|
spinlock_obtain(&(dmar_uint->lock));
|
||||||
|
|
||||||
@ -667,13 +687,7 @@ static void dmar_set_root_table(struct dmar_drhd_rt *dmar_uint)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (dmar_uint->root_table_addr == 0UL) {
|
if (dmar_uint->root_table_addr == 0UL) {
|
||||||
root_table_vaddr = alloc_paging_struct();
|
dmar_uint->root_table_addr = hva2hpa(get_root_table(dmar_uint->index));
|
||||||
|
|
||||||
if (root_table_vaddr != NULL) {
|
|
||||||
dmar_uint->root_table_addr = hva2hpa(root_table_vaddr);
|
|
||||||
} else {
|
|
||||||
ASSERT(false, "failed to allocate root table!");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Currently don't support extended root table */
|
/* Currently don't support extended root table */
|
||||||
@ -997,13 +1011,10 @@ static int add_iommu_device(const struct iommu_domain *domain, uint16_t segment,
|
|||||||
if (dmar_get_bitslice(root_entry->lower,
|
if (dmar_get_bitslice(root_entry->lower,
|
||||||
ROOT_ENTRY_LOWER_PRESENT_MASK,
|
ROOT_ENTRY_LOWER_PRESENT_MASK,
|
||||||
ROOT_ENTRY_LOWER_PRESENT_POS) == 0UL) {
|
ROOT_ENTRY_LOWER_PRESENT_POS) == 0UL) {
|
||||||
void *vaddr = alloc_paging_struct();
|
|
||||||
|
|
||||||
if (vaddr != NULL) {
|
|
||||||
/* create context table for the bus if not present */
|
/* create context table for the bus if not present */
|
||||||
context_table_addr = hva2hpa(vaddr);
|
context_table_addr = hva2hpa(get_ctx_table(dmar_uint->index, bus));
|
||||||
|
|
||||||
context_table_addr = context_table_addr >> 12;
|
context_table_addr = context_table_addr >> CPU_PAGE_SHIFT;
|
||||||
|
|
||||||
lower = dmar_set_bitslice(lower,
|
lower = dmar_set_bitslice(lower,
|
||||||
ROOT_ENTRY_LOWER_CTP_MASK,
|
ROOT_ENTRY_LOWER_CTP_MASK,
|
||||||
@ -1017,17 +1028,13 @@ static int add_iommu_device(const struct iommu_domain *domain, uint16_t segment,
|
|||||||
root_entry->lower = lower;
|
root_entry->lower = lower;
|
||||||
iommu_flush_cache(dmar_uint, root_entry,
|
iommu_flush_cache(dmar_uint, root_entry,
|
||||||
sizeof(struct dmar_root_entry));
|
sizeof(struct dmar_root_entry));
|
||||||
} else {
|
|
||||||
ASSERT(false, "failed to allocate context table!");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
context_table_addr = dmar_get_bitslice(root_entry->lower,
|
context_table_addr = dmar_get_bitslice(root_entry->lower,
|
||||||
ROOT_ENTRY_LOWER_CTP_MASK,
|
ROOT_ENTRY_LOWER_CTP_MASK,
|
||||||
ROOT_ENTRY_LOWER_CTP_POS);
|
ROOT_ENTRY_LOWER_CTP_POS);
|
||||||
}
|
}
|
||||||
|
|
||||||
context_table_addr = context_table_addr << 12;
|
context_table_addr = context_table_addr << CPU_PAGE_SHIFT;
|
||||||
|
|
||||||
context_table =
|
context_table =
|
||||||
(struct dmar_context_entry *)hpa2hva(context_table_addr);
|
(struct dmar_context_entry *)hpa2hva(context_table_addr);
|
||||||
|
@ -71,6 +71,10 @@ enum _page_table_level {
|
|||||||
#define PAGE_SIZE_2M MEM_2M
|
#define PAGE_SIZE_2M MEM_2M
|
||||||
#define PAGE_SIZE_1G MEM_1G
|
#define PAGE_SIZE_1G MEM_1G
|
||||||
|
|
||||||
|
struct cpu_page {
|
||||||
|
uint8_t contents[CPU_PAGE_SIZE];
|
||||||
|
};
|
||||||
|
|
||||||
uint64_t get_paging_pml4(void);
|
uint64_t get_paging_pml4(void);
|
||||||
void *alloc_paging_struct(void);
|
void *alloc_paging_struct(void);
|
||||||
void free_paging_struct(void *ptr);
|
void free_paging_struct(void *ptr);
|
||||||
|
Loading…
Reference in New Issue
Block a user