mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-01 08:56:55 +00:00
hv: dm: Use new I/O request data structures
struct vhm_request -> struct acrn_io_request union vhm_request_buffer -> struct acrn_io_request_buffer struct pio_request -> struct acrn_pio_request struct mmio_request -> struct acrn_mmio_request struct ioreq_notify -> struct acrn_ioreq_notify VHM_REQ_PIO_INVAL -> IOREQ_PIO_INVAL VHM_REQ_MMIO_INVAL -> IOREQ_MMIO_INVAL REQ_PORTIO -> ACRN_IOREQ_TYPE_PORTIO REQ_MMIO -> ACRN_IOREQ_TYPE_MMIO REQ_PCICFG -> ACRN_IOREQ_TYPE_PCICFG REQ_WP -> ACRN_IOREQ_TYPE_WP REQUEST_READ -> ACRN_IOREQ_DIR_READ REQUEST_WRITE -> ACRN_IOREQ_DIR_WRITE REQ_STATE_PROCESSING -> ACRN_IOREQ_STATE_PROCESSING REQ_STATE_PENDING -> ACRN_IOREQ_STATE_PENDING REQ_STATE_COMPLETE -> ACRN_IOREQ_STATE_COMPLETE REQ_STATE_FREE -> ACRN_IOREQ_STATE_FREE IC_CREATE_IOREQ_CLIENT -> ACRN_IOCTL_CREATE_IOREQ_CLIENT IC_DESTROY_IOREQ_CLIENT -> ACRN_IOCTL_DESTROY_IOREQ_CLIENT IC_ATTACH_IOREQ_CLIENT -> ACRN_IOCTL_ATTACH_IOREQ_CLIENT IC_NOTIFY_REQUEST_FINISH -> ACRN_IOCTL_NOTIFY_REQUEST_FINISH IC_CLEAR_VM_IOREQ -> ACRN_IOCTL_CLEAR_VM_IOREQ HYPERVISOR_CALLBACK_VHM_VECTOR -> HYPERVISOR_CALLBACK_HSM_VECTOR arch_fire_vhm_interrupt() -> arch_fire_hsm_interrupt() get_vhm_notification_vector() -> get_hsm_notification_vector() set_vhm_notification_vector() -> set_hsm_notification_vector() acrn_vhm_notification_vector -> acrn_hsm_notification_vector get_vhm_req_state() -> get_io_req_state() set_vhm_req_state() -> set_io_req_state() Below structures have slight difference with former ones. struct acrn_ioreq_notify strcut acrn_io_request Tracked-On: #6282 Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
This commit is contained in:
@@ -527,8 +527,7 @@ int32_t hcall_inject_msi(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __un
|
||||
*
|
||||
* @param vcpu Pointer to vCPU that initiates the hypercall
|
||||
* @param target_vm Pointer to target VM data structure
|
||||
* @param param2 guest physical address. This gpa points to
|
||||
* struct acrn_set_ioreq_buffer
|
||||
* @param param2 guest physical address. This gpa points to buffer address
|
||||
*
|
||||
* @pre is_sos_vm(vcpu->vm)
|
||||
* @return 0 on success, non-zero on error.
|
||||
@@ -542,21 +541,21 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm
|
||||
int32_t ret = -1;
|
||||
|
||||
if (is_created_vm(target_vm)) {
|
||||
struct acrn_set_ioreq_buffer iobuf;
|
||||
uint64_t iobuf;
|
||||
|
||||
if (copy_from_gpa(vm, &iobuf, param2, sizeof(iobuf)) == 0) {
|
||||
dev_dbg(DBG_LEVEL_HYCALL, "[%d] SET BUFFER=0x%p",
|
||||
target_vm->vm_id, iobuf.req_buf);
|
||||
target_vm->vm_id, iobuf);
|
||||
|
||||
hpa = gpa2hpa(vm, iobuf.req_buf);
|
||||
hpa = gpa2hpa(vm, iobuf);
|
||||
if (hpa == INVALID_HPA) {
|
||||
pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
|
||||
__func__, vm->vm_id, iobuf.req_buf);
|
||||
__func__, vm->vm_id, iobuf);
|
||||
target_vm->sw.io_shared_page = NULL;
|
||||
} else {
|
||||
target_vm->sw.io_shared_page = hpa2hva(hpa);
|
||||
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
|
||||
set_vhm_req_state(target_vm, i, REQ_STATE_FREE);
|
||||
for (i = 0U; i < ACRN_IO_REQUEST_MAX; i++) {
|
||||
set_io_req_state(target_vm, i, ACRN_IOREQ_STATE_FREE);
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
@@ -1232,7 +1231,7 @@ int32_t hcall_set_callback_vector(__unused struct acrn_vcpu *vcpu, __unused stru
|
||||
pr_err("%s: Invalid passed vector\n", __func__);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
set_vhm_notification_vector((uint32_t)param1);
|
||||
set_hsm_notification_vector((uint32_t)param1);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user