mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 04:02:05 +00:00
hv: refine for HPAn setting
The current code only supports 2 HPA regions per VM. This patch extended ACRN to support 2+ HPA regions per VM, to use host memory better if it is scatted among multiple regions. This patch uses an array to describe the hpa region for the VM, and change the logic of ve820 to support multiple regions. This patch dependent on the config tool and GPA SSRAM change Tracked-On: #6690 Signed-off-by: Chenli Wei <chenli.wei@intel.com> Reviewed-by: Fei Li <fei1.li@intel.com>
This commit is contained in:
parent
6c0e8ff793
commit
ed1c638c87
@ -14,8 +14,8 @@
|
||||
#include <asm/rtcm.h>
|
||||
#include <ptdev.h>
|
||||
|
||||
#define ENTRY_HPA1 2U
|
||||
#define ENTRY_HPA1_HI 8U
|
||||
#define ENTRY_GPA_L 2U
|
||||
#define ENTRY_GPA_HI 8U
|
||||
|
||||
static struct e820_entry service_vm_e820[E820_MAX_ENTRIES];
|
||||
static struct e820_entry pre_vm_e820[PRE_VM_NUM][E820_MAX_ENTRIES];
|
||||
@ -156,13 +156,10 @@ void create_service_vm_e820(struct acrn_vm *vm)
|
||||
struct acrn_vm_config *vm_config = get_vm_config(vm_id);
|
||||
|
||||
if (vm_config->load_order == PRE_LAUNCHED_VM) {
|
||||
filter_mem_from_service_vm_e820(vm, vm_config->memory.start_hpa,
|
||||
vm_config->memory.start_hpa + vm_config->memory.size);
|
||||
|
||||
/* if HPA2 is available, filter it out as well*/
|
||||
if (vm_config->memory.size_hpa2 != 0UL) {
|
||||
filter_mem_from_service_vm_e820(vm, vm_config->memory.start_hpa2,
|
||||
vm_config->memory.start_hpa2 + vm_config->memory.size_hpa2);
|
||||
for (i = 0; i < vm_config->memory.region_num; i++) {
|
||||
filter_mem_from_service_vm_e820(vm, vm_config->memory.host_regions[i].start_hpa,
|
||||
vm_config->memory.host_regions[i].start_hpa
|
||||
+ vm_config->memory.host_regions[i].size_hpa);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -219,6 +216,18 @@ static const struct e820_entry pre_ve820_template[E820_MAX_ENTRIES] = {
|
||||
},
|
||||
};
|
||||
|
||||
static inline uint64_t calculate_memory_size(struct vm_hpa_regions *regions, uint64_t num)
|
||||
{
|
||||
uint64_t i;
|
||||
uint64_t size = 0;
|
||||
|
||||
for(i = 0; i < num; i++) {
|
||||
size += regions[i].size_hpa;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre entry != NULL
|
||||
*/
|
||||
@ -273,44 +282,22 @@ void create_prelaunched_vm_e820(struct acrn_vm *vm)
|
||||
{
|
||||
struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
|
||||
uint64_t gpa_start = 0x100000000UL;
|
||||
uint64_t hpa1_hi_size, hpa2_lo_size;
|
||||
uint64_t gpa_hi_size;
|
||||
uint64_t lowmem_max_length = MEM_2G - PRE_RTVM_SW_SRAM_MAX_SIZE - GPU_OPREGION_SIZE;
|
||||
uint64_t remaining_hpa2_size = vm_config->memory.size_hpa2;
|
||||
uint32_t entry_idx = ENTRY_HPA1_HI;
|
||||
uint32_t entry_idx = ENTRY_GPA_HI;
|
||||
uint64_t memory_size = calculate_memory_size(vm_config->memory.host_regions, vm_config->memory.region_num);
|
||||
|
||||
vm->e820_entries = pre_vm_e820[vm->vm_id];
|
||||
(void)memcpy_s((void *)vm->e820_entries, E820_MAX_ENTRIES * sizeof(struct e820_entry),
|
||||
(const void *)pre_ve820_template, E820_MAX_ENTRIES * sizeof(struct e820_entry));
|
||||
|
||||
/* sanitize entry for hpa1 */
|
||||
if (vm_config->memory.size > lowmem_max_length) {
|
||||
/* need to split hpa1 and add an entry for hpa1_hi */
|
||||
hpa1_hi_size = vm_config->memory.size - lowmem_max_length;
|
||||
gpa_start = add_ram_entry((vm->e820_entries + entry_idx), gpa_start, hpa1_hi_size);
|
||||
if (memory_size > lowmem_max_length) {
|
||||
gpa_hi_size = memory_size - lowmem_max_length;
|
||||
add_ram_entry((vm->e820_entries + entry_idx), gpa_start, gpa_hi_size);
|
||||
entry_idx++;
|
||||
} else {
|
||||
/* need to revise length of hpa1 entry to its actual size, excluding size of used space */
|
||||
vm->e820_entries[ENTRY_HPA1].length = vm_config->memory.size - MEM_1M - VIRT_ACPI_DATA_LEN - VIRT_ACPI_NVS_LEN;
|
||||
if (remaining_hpa2_size > 0UL) {
|
||||
/* need to set gpa_start for hpa2 */
|
||||
gpa_start = vm->e820_entries[ENTRY_HPA1].baseaddr + vm->e820_entries[ENTRY_HPA1].length;
|
||||
if (remaining_hpa2_size > (lowmem_max_length - vm_config->memory.size)) {
|
||||
/* need to split hpa2 and add an entry for hpa2_lo */
|
||||
hpa2_lo_size = lowmem_max_length - vm_config->memory.size;
|
||||
} else {
|
||||
hpa2_lo_size = remaining_hpa2_size;
|
||||
}
|
||||
(void)add_ram_entry((vm->e820_entries + entry_idx), gpa_start, hpa2_lo_size);
|
||||
gpa_start = MEM_4G;
|
||||
remaining_hpa2_size -= hpa2_lo_size;
|
||||
entry_idx++;
|
||||
}
|
||||
}
|
||||
|
||||
/* check whether need an entry for remaining hpa2 */
|
||||
if (remaining_hpa2_size > 0UL) {
|
||||
(void)add_ram_entry((vm->e820_entries + entry_idx), gpa_start, remaining_hpa2_size);
|
||||
entry_idx++;
|
||||
vm->e820_entries[ENTRY_GPA_L].length = memory_size - MEM_1M - VIRT_ACPI_DATA_LEN - VIRT_ACPI_NVS_LEN;
|
||||
}
|
||||
|
||||
vm->e820_entry_num = entry_idx;
|
||||
|
@ -306,13 +306,20 @@ static inline uint16_t get_configured_bsp_pcpu_id(const struct acrn_vm_config *v
|
||||
*/
|
||||
static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_vm_config *vm_config)
|
||||
{
|
||||
bool is_hpa1 = true;
|
||||
uint64_t base_hpa = vm_config->memory.start_hpa;
|
||||
uint64_t remaining_hpa_size = vm_config->memory.size;
|
||||
uint64_t base_hpa;
|
||||
uint64_t base_gpa;
|
||||
uint64_t remaining_entry_size;
|
||||
uint32_t hpa_index;
|
||||
uint64_t base_size;
|
||||
uint32_t i;
|
||||
struct vm_hpa_regions tmp_vm_hpa;
|
||||
const struct e820_entry *entry;
|
||||
|
||||
hpa_index = 0U;
|
||||
tmp_vm_hpa = vm_config->memory.host_regions[0];
|
||||
|
||||
for (i = 0U; i < vm->e820_entry_num; i++) {
|
||||
const struct e820_entry *entry = &(vm->e820_entries[i]);
|
||||
entry = &(vm->e820_entries[i]);
|
||||
|
||||
if (entry->length == 0UL) {
|
||||
continue;
|
||||
@ -327,33 +334,39 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
|
||||
}
|
||||
}
|
||||
|
||||
if (remaining_hpa_size >= entry->length) {
|
||||
/* Do EPT mapping for GPAs that are backed by physical memory */
|
||||
if ((entry->type == E820_TYPE_RAM) || (entry->type == E820_TYPE_ACPI_RECLAIM)
|
||||
|| (entry->type == E820_TYPE_ACPI_NVS)) {
|
||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
|
||||
entry->length, EPT_RWX | EPT_WB);
|
||||
base_hpa += entry->length;
|
||||
remaining_hpa_size -= entry->length;
|
||||
}
|
||||
|
||||
/* GPAs under 1MB are always backed by physical memory */
|
||||
if ((entry->type != E820_TYPE_RAM) && (entry->baseaddr < (uint64_t)MEM_1M)) {
|
||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
|
||||
entry->length, EPT_RWX | EPT_UNCACHED);
|
||||
base_hpa += entry->length;
|
||||
remaining_hpa_size -= entry->length;
|
||||
}
|
||||
} else {
|
||||
if (entry->type == E820_TYPE_RAM) {
|
||||
pr_warn("%s: HPA size incorrectly configured in v820\n", __func__);
|
||||
}
|
||||
if ((entry->type == E820_TYPE_RESERVED) && (entry->baseaddr > MEM_1M)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((remaining_hpa_size == 0UL) && (is_hpa1)) {
|
||||
is_hpa1 = false;
|
||||
base_hpa = vm_config->memory.start_hpa2;
|
||||
remaining_hpa_size = vm_config->memory.size_hpa2;
|
||||
base_gpa = entry->baseaddr;
|
||||
remaining_entry_size = entry->length;
|
||||
|
||||
while ((hpa_index < vm_config->memory.region_num) && (remaining_entry_size > 0)) {
|
||||
|
||||
base_hpa = tmp_vm_hpa.start_hpa;
|
||||
base_size = min(remaining_entry_size, tmp_vm_hpa.size_hpa);
|
||||
|
||||
if (tmp_vm_hpa.size_hpa > remaining_entry_size) {
|
||||
/* from low to high */
|
||||
tmp_vm_hpa.start_hpa += base_size;
|
||||
tmp_vm_hpa.size_hpa -= base_size;
|
||||
} else {
|
||||
hpa_index++;
|
||||
if (hpa_index < vm_config->memory.region_num) {
|
||||
tmp_vm_hpa = vm_config->memory.host_regions[hpa_index];
|
||||
}
|
||||
}
|
||||
|
||||
if (entry->type != E820_TYPE_RESERVED) {
|
||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, base_gpa,
|
||||
base_size, EPT_RWX | EPT_WB);
|
||||
} else {
|
||||
/* GPAs under 1MB are always backed by physical memory */
|
||||
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, base_gpa,
|
||||
base_size, EPT_RWX | EPT_UNCACHED);
|
||||
}
|
||||
remaining_entry_size -= base_size;
|
||||
base_gpa += base_size;
|
||||
}
|
||||
}
|
||||
|
||||
@ -504,7 +517,9 @@ static void prepare_service_vm_memmap(struct acrn_vm *vm)
|
||||
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
|
||||
vm_config = get_vm_config(vm_id);
|
||||
if (vm_config->load_order == PRE_LAUNCHED_VM) {
|
||||
ept_del_mr(vm, pml4_page, vm_config->memory.start_hpa, vm_config->memory.size);
|
||||
for (i = 0; i < vm_config->memory.region_num; i++){
|
||||
ept_del_mr(vm, pml4_page, vm_config->memory.host_regions[i].start_hpa, vm_config->memory.host_regions[i].size_hpa);
|
||||
}
|
||||
/* Remove MMIO/IO bars of pre-launched VM's ptdev */
|
||||
deny_pdevs(vm, vm_config->pci_devs, vm_config->pci_dev_num);
|
||||
}
|
||||
|
@ -70,12 +70,15 @@ enum acrn_vm_severity {
|
||||
SEVERITY_STANDARD_VM = 0x10U,
|
||||
};
|
||||
|
||||
struct vm_hpa_regions {
|
||||
uint64_t start_hpa;
|
||||
uint64_t size_hpa;
|
||||
};
|
||||
|
||||
struct acrn_vm_mem_config {
|
||||
uint64_t start_hpa; /* the start HPA of VM memory configuration, for pre-launched VMs only */
|
||||
uint64_t size; /* VM memory size configuration */
|
||||
uint64_t start_hpa2; /* Start of second HPA for non-contiguous allocations in VM memory configuration,
|
||||
for pre-launched VMs only */
|
||||
uint64_t size_hpa2; /* Size of second HPA for non-contiguous allocations in VM memory configuration */
|
||||
uint64_t region_num;
|
||||
struct vm_hpa_regions *host_regions;
|
||||
};
|
||||
|
||||
struct target_vuart {
|
||||
|
Loading…
Reference in New Issue
Block a user