hv:Replace dynamic memory with static for mmio

-- Config MAX_EMULATED_MMIO_REGIONS 16 in Kconfig
-- Add emulated mmio array and emulated mmio regions
   in vm structure
-- Remove mmio list in vm structure
-- Remove unregister_mmio_emulation_handler and
   vioapic_cleanup APIs

Tracked-On: #861
Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Mingqiang Chi 2018-11-06 10:16:11 +08:00 committed by lijinxia
parent b5505c43a2
commit 9a009bcef2
9 changed files with 42 additions and 98 deletions

View File

@ -322,9 +322,6 @@ I/O bitmaps and register or unregister I/O handlers:
.. doxygenfunction:: register_mmio_emulation_handler .. doxygenfunction:: register_mmio_emulation_handler
:project: Project ACRN :project: Project ACRN
.. doxygenfunction:: unregister_mmio_emulation_handler
:project: Project ACRN
I/O Emulation I/O Emulation
============= =============

View File

@ -86,6 +86,11 @@ config MAX_PCPU_NUM
range 1 8 range 1 8
default 8 default 8
config MAX_EMULATED_MMIO_REGIONS
int "Maximum number of emulated mmio regions"
range 0 128
default 16
config MAX_IOMMU_NUM config MAX_IOMMU_NUM
int "Maximum number of IOMMU devices" int "Maximum number of IOMMU devices"
range 1 2 range 1 2

View File

@ -80,9 +80,8 @@ int create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
/* Map Virtual Machine to its VM Description */ /* Map Virtual Machine to its VM Description */
vm->vm_desc = vm_desc; vm->vm_desc = vm_desc;
#endif #endif
/* Init mmio list */
INIT_LIST_HEAD(&vm->mmio_list);
vm->hw.created_vcpus = 0U; vm->hw.created_vcpus = 0U;
vm->emul_mmio_regions = 0U;
/* gpa_lowtop are used for system start up */ /* gpa_lowtop are used for system start up */
vm->hw.gpa_lowtop = 0UL; vm->hw.gpa_lowtop = 0UL;
@ -173,8 +172,6 @@ int create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
err: err:
vioapic_cleanup(vm_ioapic(vm));
if (vm->arch_vm.nworld_eptp != NULL) { if (vm->arch_vm.nworld_eptp != NULL) {
(void)memset(vm->arch_vm.nworld_eptp, 0U, CPU_PAGE_SIZE); (void)memset(vm->arch_vm.nworld_eptp, 0U, CPU_PAGE_SIZE);
} }
@ -205,9 +202,6 @@ int shutdown_vm(struct acrn_vm *vm)
ptdev_release_all_entries(vm); ptdev_release_all_entries(vm);
/* cleanup vioapic */
vioapic_cleanup(vm_ioapic(vm));
/* Free EPT allocated resources assigned to VM */ /* Free EPT allocated resources assigned to VM */
destroy_ept(vm); destroy_ept(vm);

View File

@ -253,33 +253,34 @@ static int32_t
hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req) hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
{ {
int status = -ENODEV; int status = -ENODEV;
uint16_t idx;
uint64_t address, size; uint64_t address, size;
struct list_head *pos;
struct mmio_request *mmio_req = &io_req->reqs.mmio; struct mmio_request *mmio_req = &io_req->reqs.mmio;
struct mem_io_node *mmio_handler = NULL; struct mem_io_node *mmio_handler = NULL;
address = mmio_req->address; address = mmio_req->address;
size = mmio_req->size; size = mmio_req->size;
list_for_each(pos, &vcpu->vm->mmio_list) { for (idx = 0U; idx < vcpu->vm->emul_mmio_regions; idx++) {
uint64_t base, end; uint64_t base, end;
mmio_handler = list_entry(pos, struct mem_io_node, list); mmio_handler = &(vcpu->vm->emul_mmio[idx]);
base = mmio_handler->range_start; base = mmio_handler->range_start;
end = mmio_handler->range_end; end = mmio_handler->range_end;
if ((address + size <= base) || (address >= end)) { if ((address + size <= base) || (address >= end)) {
continue; continue;
} else if (!((address >= base) && (address + size <= end))) { } else if (!((address >= base) && (address + size <= end))) {
pr_fatal("Err MMIO, address:0x%llx, size:%x", pr_fatal("Err MMIO, address:0x%llx, size:%x", address, size);
address, size);
return -EIO; return -EIO;
} else { } else {
/* Handle this MMIO operation */ /* Handle this MMIO operation */
if (mmio_handler->read_write) {
status = mmio_handler->read_write(io_req, mmio_handler->handler_private_data); status = mmio_handler->read_write(io_req, mmio_handler->handler_private_data);
break; break;
} }
} }
}
return status; return status;
} }
@ -499,64 +500,35 @@ int register_mmio_emulation_handler(struct acrn_vm *vm,
/* Ensure both a read/write handler and range check function exist */ /* Ensure both a read/write handler and range check function exist */
if ((read_write != NULL) && (end > start)) { if ((read_write != NULL) && (end > start)) {
/* Allocate memory for node */
mmio_node =
(struct mem_io_node *)calloc(1U, sizeof(struct mem_io_node));
/* Ensure memory successfully allocated */ if (vm->emul_mmio_regions >= CONFIG_MAX_EMULATED_MMIO_REGIONS) {
if (mmio_node != NULL) { pr_err("the emulated mmio region is out of range");
return status;
}
mmio_node = &(vm->emul_mmio[vm->emul_mmio_regions]);
/* Fill in information for this node */ /* Fill in information for this node */
mmio_node->read_write = read_write; mmio_node->read_write = read_write;
mmio_node->handler_private_data = handler_private_data; mmio_node->handler_private_data = handler_private_data;
INIT_LIST_HEAD(&mmio_node->list);
list_add(&mmio_node->list, &vm->mmio_list);
mmio_node->range_start = start; mmio_node->range_start = start;
mmio_node->range_end = end; mmio_node->range_end = end;
(vm->emul_mmio_regions)++;
/* /*
* SOS would map all its memory at beginning, so we * SOS would map all its memory at beginning, so we
* should unmap it. But UOS will not, so we shouldn't * should unmap it. But UOS will not, so we shouldn't
* need to unmap it. * need to unmap it.
*/ */
if (is_vm0(vm)) { if (is_vm0(vm)) {
ept_mr_del(vm, ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
(uint64_t *)vm->arch_vm.nworld_eptp,
start, end - start); start, end - start);
} }
/* Return success */ /* Return success */
status = 0; status = 0;
}
} }
/* Return status to caller */ /* Return status to caller */
return status; return status;
} }
/**
* @brief Unregister a MMIO handler
*
* @param vm The VM from which MMIO handlers are unregistered
* @param start The base address of the range the to-be-unregistered handler is for
* @param end The end of the range (exclusive) the to-be-unregistered handler is for
*/
void unregister_mmio_emulation_handler(struct acrn_vm *vm, uint64_t start,
uint64_t end)
{
struct list_head *pos, *tmp;
struct mem_io_node *mmio_node;
list_for_each_safe(pos, tmp, &vm->mmio_list) {
mmio_node = list_entry(pos, struct mem_io_node, list);
if ((mmio_node->range_start == start) &&
(mmio_node->range_end == end)) {
/* assume only one entry found in mmio_list */
list_del_init(&mmio_node->list);
free(mmio_node);
break;
}
}
}

View File

@ -523,14 +523,6 @@ vioapic_init(struct acrn_vm *vm)
vm); vm);
} }
void
vioapic_cleanup(const struct acrn_vioapic *vioapic)
{
unregister_mmio_emulation_handler(vioapic->vm,
(uint64_t)VIOAPIC_BASE,
(uint64_t)VIOAPIC_BASE + VIOAPIC_SIZE);
}
uint32_t uint32_t
vioapic_pincount(const struct acrn_vm *vm) vioapic_pincount(const struct acrn_vm *vm)
{ {

View File

@ -387,11 +387,7 @@ static int vmsix_init(struct pci_vdev *vdev)
static int vmsix_deinit(struct pci_vdev *vdev) static int vmsix_deinit(struct pci_vdev *vdev)
{ {
if (vdev->msix.intercepted_size != 0UL) {
unregister_mmio_emulation_handler(vdev->vpci->vm, vdev->msix.intercepted_gpa,
vdev->msix.intercepted_gpa + vdev->msix.intercepted_size);
vdev->msix.intercepted_size = 0U; vdev->msix.intercepted_size = 0U;
}
if (vdev->msix.table_count != 0U) { if (vdev->msix.table_count != 0U) {
ptdev_remove_msix_remapping(vdev->vpci->vm, vdev->vbdf.value, vdev->msix.table_count); ptdev_remove_msix_remapping(vdev->vpci->vm, vdev->vbdf.value, vdev->msix.table_count);

View File

@ -56,7 +56,6 @@ struct acrn_vioapic {
}; };
void vioapic_init(struct acrn_vm *vm); void vioapic_init(struct acrn_vm *vm);
void vioapic_cleanup(const struct acrn_vioapic *vioapic);
void vioapic_reset(struct acrn_vioapic *vioapic); void vioapic_reset(struct acrn_vioapic *vioapic);

View File

@ -134,9 +134,8 @@ struct acrn_vm {
struct iommu_domain *iommu; /* iommu domain of this VM */ struct iommu_domain *iommu; /* iommu domain of this VM */
spinlock_t spinlock; /* Spin-lock used to protect VM modifications */ spinlock_t spinlock; /* Spin-lock used to protect VM modifications */
struct list_head mmio_list; /* list for mmio. This list is not updated uint16_t emul_mmio_regions; /* Number of emulated mmio regions */
* when vm is active. So no lock needed struct mem_io_node emul_mmio[CONFIG_MAX_EMULATED_MMIO_REGIONS];
*/
unsigned char GUID[16]; unsigned char GUID[16];
struct secure_world_control sworld_control; struct secure_world_control sworld_control;

View File

@ -227,16 +227,6 @@ int register_mmio_emulation_handler(struct acrn_vm *vm,
hv_mem_io_handler_t read_write, uint64_t start, hv_mem_io_handler_t read_write, uint64_t start,
uint64_t end, void *handler_private_data); uint64_t end, void *handler_private_data);
/**
* @brief Unregister a MMIO handler
*
* @param vm The VM from which MMIO handlers are unregistered
* @param start The base address of the range the to-be-unregistered handler is for
* @param end The end of the range (exclusive) the to-be-unregistered handler is for
*/
void unregister_mmio_emulation_handler(struct acrn_vm *vm, uint64_t start,
uint64_t end);
/** /**
* @brief General post-work for MMIO emulation * @brief General post-work for MMIO emulation
* *