mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-04 01:41:08 +00:00
HV: io: move MMIO handler registration to io.c
This patch solely moves MMIO handler registration APIs from ept.c to io.c as it is related more to I/O request handling. Signed-off-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
b21b172347
commit
26ab2c9146
@ -189,75 +189,6 @@ bool is_ept_supported(void)
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
int register_mmio_emulation_handler(struct vm *vm,
|
|
||||||
hv_mem_io_handler_t read_write, uint64_t start,
|
|
||||||
uint64_t end, void *handler_private_data)
|
|
||||||
{
|
|
||||||
int status = -EINVAL;
|
|
||||||
struct mem_io_node *mmio_node;
|
|
||||||
|
|
||||||
if (vm->hw.created_vcpus > 0 && vm->hw.vcpu_array[0]->launched) {
|
|
||||||
ASSERT(false, "register mmio handler after vm launched");
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Ensure both a read/write handler and range check function exist */
|
|
||||||
if ((read_write != NULL) && (end > start)) {
|
|
||||||
/* Allocate memory for node */
|
|
||||||
mmio_node =
|
|
||||||
(struct mem_io_node *)calloc(1U, sizeof(struct mem_io_node));
|
|
||||||
|
|
||||||
/* Ensure memory successfully allocated */
|
|
||||||
if (mmio_node != NULL) {
|
|
||||||
/* Fill in information for this node */
|
|
||||||
mmio_node->read_write = read_write;
|
|
||||||
mmio_node->handler_private_data = handler_private_data;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&mmio_node->list);
|
|
||||||
list_add(&mmio_node->list, &vm->mmio_list);
|
|
||||||
|
|
||||||
mmio_node->range_start = start;
|
|
||||||
mmio_node->range_end = end;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SOS would map all its memory at beginning, so we
|
|
||||||
* should unmap it. But UOS will not, so we shouldn't
|
|
||||||
* need to unmap it.
|
|
||||||
*/
|
|
||||||
if (is_vm0(vm)) {
|
|
||||||
ept_mr_del(vm,
|
|
||||||
(uint64_t *)vm->arch_vm.nworld_eptp,
|
|
||||||
start, end - start);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return success */
|
|
||||||
status = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return status to caller */
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
|
|
||||||
uint64_t end)
|
|
||||||
{
|
|
||||||
struct list_head *pos, *tmp;
|
|
||||||
struct mem_io_node *mmio_node;
|
|
||||||
|
|
||||||
list_for_each_safe(pos, tmp, &vm->mmio_list) {
|
|
||||||
mmio_node = list_entry(pos, struct mem_io_node, list);
|
|
||||||
|
|
||||||
if ((mmio_node->range_start == start) &&
|
|
||||||
(mmio_node->range_end == end)) {
|
|
||||||
/* assume only one entry found in mmio_list */
|
|
||||||
list_del_init(&mmio_node->list);
|
|
||||||
free(mmio_node);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int ept_violation_vmexit_handler(struct vcpu *vcpu)
|
int ept_violation_vmexit_handler(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int status = -EINVAL, ret;
|
int status = -EINVAL, ret;
|
||||||
|
@ -435,3 +435,72 @@ void register_io_emulation_handler(struct vm *vm, struct vm_io_range *range,
|
|||||||
|
|
||||||
register_io_handler(vm, handler);
|
register_io_handler(vm, handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int register_mmio_emulation_handler(struct vm *vm,
|
||||||
|
hv_mem_io_handler_t read_write, uint64_t start,
|
||||||
|
uint64_t end, void *handler_private_data)
|
||||||
|
{
|
||||||
|
int status = -EINVAL;
|
||||||
|
struct mem_io_node *mmio_node;
|
||||||
|
|
||||||
|
if (vm->hw.created_vcpus > 0 && vm->hw.vcpu_array[0]->launched) {
|
||||||
|
ASSERT(false, "register mmio handler after vm launched");
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure both a read/write handler and range check function exist */
|
||||||
|
if ((read_write != NULL) && (end > start)) {
|
||||||
|
/* Allocate memory for node */
|
||||||
|
mmio_node =
|
||||||
|
(struct mem_io_node *)calloc(1U, sizeof(struct mem_io_node));
|
||||||
|
|
||||||
|
/* Ensure memory successfully allocated */
|
||||||
|
if (mmio_node != NULL) {
|
||||||
|
/* Fill in information for this node */
|
||||||
|
mmio_node->read_write = read_write;
|
||||||
|
mmio_node->handler_private_data = handler_private_data;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&mmio_node->list);
|
||||||
|
list_add(&mmio_node->list, &vm->mmio_list);
|
||||||
|
|
||||||
|
mmio_node->range_start = start;
|
||||||
|
mmio_node->range_end = end;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SOS would map all its memory at beginning, so we
|
||||||
|
* should unmap it. But UOS will not, so we shouldn't
|
||||||
|
* need to unmap it.
|
||||||
|
*/
|
||||||
|
if (is_vm0(vm)) {
|
||||||
|
ept_mr_del(vm,
|
||||||
|
(uint64_t *)vm->arch_vm.nworld_eptp,
|
||||||
|
start, end - start);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return success */
|
||||||
|
status = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return status to caller */
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
|
||||||
|
uint64_t end)
|
||||||
|
{
|
||||||
|
struct list_head *pos, *tmp;
|
||||||
|
struct mem_io_node *mmio_node;
|
||||||
|
|
||||||
|
list_for_each_safe(pos, tmp, &vm->mmio_list) {
|
||||||
|
mmio_node = list_entry(pos, struct mem_io_node, list);
|
||||||
|
|
||||||
|
if ((mmio_node->range_start == start) &&
|
||||||
|
(mmio_node->range_end == end)) {
|
||||||
|
/* assume only one entry found in mmio_list */
|
||||||
|
list_del_init(&mmio_node->list);
|
||||||
|
free(mmio_node);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user