From 88f74b5dbb03f62347a7f947e077dc3dbb476337 Mon Sep 17 00:00:00 2001 From: Junjie Mao Date: Tue, 24 Jul 2018 19:05:47 +0800 Subject: [PATCH] HV: io: unify vhm_request req and mem_io in vcpu The current struct vcpu has two members, namely 'struct vhm_request req' and 'struct mem_io mmio', that hold similar info, including the address, direction, size, value and status of mmio reqeusts. As a step towards a unified framework for both MMIO/PIO, this patch unifies these two members by a tailored version of vhm_reqeust, mostly with the reserved fields dropped. The definitions to request types, directions and process status are reused. Handling errors during emulations will be revisited after the I/O emulation paths are unified. Thus for this patch the mmio.mmio_status in inherited by io_req.processed which is not yet properly processed. Signed-off-by: Junjie Mao Acked-by: Eddie Dong --- hypervisor/arch/x86/ept.c | 94 +++++++++---------- .../arch/x86/guest/instr_emul_wrapper.c | 8 +- hypervisor/arch/x86/guest/vcpu.c | 2 + hypervisor/arch/x86/guest/vioapic.c | 21 ++--- hypervisor/arch/x86/guest/vlapic.c | 35 ++++--- hypervisor/arch/x86/io.c | 94 ++++++++++--------- hypervisor/common/io_request.c | 48 +++++----- hypervisor/include/arch/x86/guest/vcpu.h | 3 +- hypervisor/include/arch/x86/guest/vioapic.h | 2 +- hypervisor/include/arch/x86/guest/vlapic.h | 2 +- hypervisor/include/arch/x86/ioreq.h | 38 ++++---- hypervisor/include/public/acrn_common.h | 14 +-- 12 files changed, 180 insertions(+), 181 deletions(-) diff --git a/hypervisor/arch/x86/ept.c b/hypervisor/arch/x86/ept.c index 3c46dda6e..d0ccc78ed 100644 --- a/hypervisor/arch/x86/ept.c +++ b/hypervisor/arch/x86/ept.c @@ -190,16 +190,19 @@ bool is_ept_supported(void) return status; } -static int hv_emulate_mmio(struct vcpu *vcpu, struct mem_io *mmio, - struct mem_io_node *mmio_handler) +static int +hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req, + struct mem_io_node *mmio_handler) { - if ((mmio->paddr % mmio->access_size) != 0) { + struct mmio_request *mmio_req = &io_req->reqs.mmio; + + if ((mmio_req->address % mmio_req->size) != 0UL) { pr_err("access size not align with paddr"); return -EINVAL; } /* Handle this MMIO operation */ - return mmio_handler->read_write(vcpu, mmio, + return mmio_handler->read_write(vcpu, io_req, mmio_handler->handler_private_data); } @@ -276,26 +279,25 @@ int dm_emulate_mmio_post(struct vcpu *vcpu) { int ret = 0; uint16_t cur = vcpu->vcpu_id; + struct io_request *io_req = &vcpu->req; + struct mmio_request *mmio_req = &io_req->reqs.mmio; union vhm_request_buffer *req_buf; + struct vhm_request *vhm_req; req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); + vhm_req = &req_buf->req_queue[cur]; - vcpu->req.reqs.mmio_request.value = - req_buf->req_queue[cur].reqs.mmio_request.value; + mmio_req->value = vhm_req->reqs.mmio.value; + io_req->processed = vhm_req->processed; /* VHM emulation data already copy to req, mark to free slot now */ - req_buf->req_queue[cur].valid = false; + vhm_req->valid = 0; - if (req_buf->req_queue[cur].processed == REQ_STATE_SUCCESS) { - vcpu->mmio.mmio_status = MMIO_TRANS_VALID; - } - else { - vcpu->mmio.mmio_status = MMIO_TRANS_INVALID; + if (io_req->processed != REQ_STATE_SUCCESS) { goto out; } - if (vcpu->mmio.read_write == HV_MEM_IO_READ) { - vcpu->mmio.value = vcpu->req.reqs.mmio_request.value; + if (mmio_req->direction == REQUEST_READ) { /* Emulate instruction and update vcpu register set */ ret = emulate_instruction(vcpu); if (ret != 0) { @@ -307,29 +309,24 @@ out: return ret; } -static int dm_emulate_mmio_pre(struct vcpu *vcpu, uint64_t exit_qual) +static int +dm_emulate_mmio_pre(struct vcpu *vcpu, uint64_t exit_qual __unused) { int status; + struct io_request *io_req = &vcpu->req; + struct mmio_request *mmio_req = &io_req->reqs.mmio; - if (vcpu->mmio.read_write == HV_MEM_IO_WRITE) { + if (mmio_req->direction == REQUEST_WRITE) { status = emulate_instruction(vcpu); if (status != 0) { return status; } - vcpu->req.reqs.mmio_request.value = vcpu->mmio.value; /* XXX: write access while EPT perm RX -> WP */ if ((exit_qual & 0x38UL) == 0x28UL) { - vcpu->req.type = REQ_WP; + io_req->type = REQ_WP; } } - if (vcpu->req.type == 0U) { - vcpu->req.type = REQ_MMIO; - } - vcpu->req.reqs.mmio_request.direction = vcpu->mmio.read_write; - vcpu->req.reqs.mmio_request.address = (long)vcpu->mmio.paddr; - vcpu->req.reqs.mmio_request.size = vcpu->mmio.access_size; - return 0; } @@ -339,31 +336,28 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu) uint64_t exit_qual; uint64_t gpa; struct list_head *pos; - struct mem_io *mmio = &vcpu->mmio; + struct io_request *io_req = &vcpu->req; + struct mmio_request *mmio_req = &io_req->reqs.mmio; struct mem_io_node *mmio_handler = NULL; + io_req->type = REQ_MMIO; + io_req->processed = REQ_STATE_PENDING; + /* Handle page fault from guest */ exit_qual = vcpu->arch_vcpu.exit_qualification; /* Specify if read or write operation */ if ((exit_qual & 0x2UL) != 0UL) { /* Write operation */ - mmio->read_write = HV_MEM_IO_WRITE; - - /* Get write value from appropriate register in context */ - /* TODO: Need to figure out how to determine value being - * written - */ - mmio->value = 0UL; + mmio_req->direction = REQUEST_WRITE; + mmio_req->value = 0UL; } else { /* Read operation */ - mmio->read_write = HV_MEM_IO_READ; + mmio_req->direction = REQUEST_READ; - /* Get sign extension requirements for read */ /* TODO: Need to determine how sign extension is determined for * reads */ - mmio->sign_extend_read = 0U; } /* Get the guest physical address */ @@ -373,37 +367,35 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu) /* Adjust IPA appropriately and OR page offset to get full IPA of abort */ - mmio->paddr = gpa; + mmio_req->address = gpa; ret = decode_instruction(vcpu); if (ret > 0) { - mmio->access_size = ret; - } - else if (ret == -EFAULT) { + mmio_req->size = (uint64_t)ret; + } else if (ret == -EFAULT) { pr_info("page fault happen during decode_instruction"); status = 0; goto out; - } - else { + } else { goto out; } list_for_each(pos, &vcpu->vm->mmio_list) { mmio_handler = list_entry(pos, struct mem_io_node, list); - if (((mmio->paddr + mmio->access_size) <= + if (((mmio_req->address + mmio_req->size) <= mmio_handler->range_start) || - (mmio->paddr >= mmio_handler->range_end)) { + (mmio_req->address >= mmio_handler->range_end)) { continue; } - else if (!((mmio->paddr >= mmio_handler->range_start) && - ((mmio->paddr + mmio->access_size) <= + else if (!((mmio_req->address >= mmio_handler->range_start) && + ((mmio_req->address + mmio_req->size) <= mmio_handler->range_end))) { pr_fatal("Err MMIO, addr:0x%llx, size:%x", - mmio->paddr, mmio->access_size); + mmio_req->address, mmio_req->size); return -EIO; } - if (mmio->read_write == HV_MEM_IO_WRITE) { + if (mmio_req->direction == REQUEST_WRITE) { if (emulate_instruction(vcpu) != 0) { goto out; } @@ -414,8 +406,8 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu) * instruction emulation. For MMIO read, * call hv_emulate_mmio at first. */ - hv_emulate_mmio(vcpu, mmio, mmio_handler); - if (mmio->read_write == HV_MEM_IO_READ) { + hv_emulate_mmio(vcpu, io_req, mmio_handler); + if (mmio_req->direction == REQUEST_READ) { /* Emulate instruction and update vcpu register set */ if (emulate_instruction(vcpu) != 0) { goto out; @@ -435,8 +427,6 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu) * instruction emulation. For MMIO read, ask DM to run MMIO * emulation at first. */ - (void)memset(&vcpu->req, 0, sizeof(struct vhm_request)); - if (dm_emulate_mmio_pre(vcpu, exit_qual) != 0) { goto out; } diff --git a/hypervisor/arch/x86/guest/instr_emul_wrapper.c b/hypervisor/arch/x86/guest/instr_emul_wrapper.c index 0c7495a8a..2e5aa26cb 100644 --- a/hypervisor/arch/x86/guest/instr_emul_wrapper.c +++ b/hypervisor/arch/x86/guest/instr_emul_wrapper.c @@ -243,7 +243,7 @@ encode_vmcs_seg_desc(enum cpu_reg_name seg, * *Post Condition: *In the non-general register names group (CPU_REG_CR0~CPU_REG_GDTR), - *for register names CPU_REG_CR2, CPU_REG_IDTR and CPU_REG_GDTR, + *for register names CPU_REG_CR2, CPU_REG_IDTR and CPU_REG_GDTR, *this function returns VMX_INVALID_VMCS_FIELD; *for other register names, it returns correspoding field index MACROs *in VMCS. @@ -319,7 +319,7 @@ static int mmio_read(struct vcpu *vcpu, __unused uint64_t gpa, uint64_t *rval, return -EINVAL; } - *rval = vcpu->mmio.value; + *rval = vcpu->req.reqs.mmio.value; return 0; } @@ -330,7 +330,7 @@ static int mmio_write(struct vcpu *vcpu, __unused uint64_t gpa, uint64_t wval, return -EINVAL; } - vcpu->mmio.value = wval; + vcpu->req.reqs.mmio.value = wval; return 0; } @@ -375,7 +375,7 @@ int emulate_instruction(struct vcpu *vcpu) struct emul_ctxt *emul_ctxt; struct vm_guest_paging *paging; int retval = 0; - uint64_t gpa = vcpu->mmio.paddr; + uint64_t gpa = vcpu->req.reqs.mmio.address; mem_region_read_t mread = mmio_read; mem_region_write_t mwrite = mmio_write; diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 617a28a47..c5e2e8583 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -122,6 +122,8 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle) vcpu->pending_pre_work = 0U; vcpu->state = VCPU_INIT; + (void)memset(&vcpu->req, 0U, sizeof(struct io_request)); + return 0; } diff --git a/hypervisor/arch/x86/guest/vioapic.c b/hypervisor/arch/x86/guest/vioapic.c index 2ad77e4b9..a9cbbd670 100644 --- a/hypervisor/arch/x86/guest/vioapic.c +++ b/hypervisor/arch/x86/guest/vioapic.c @@ -596,32 +596,31 @@ vioapic_pincount(struct vm *vm) } } -int vioapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio, +int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req, __unused void *handler_private_data) { struct vm *vm = vcpu->vm; - uint64_t gpa = mmio->paddr; + struct mmio_request *mmio = &io_req->reqs.mmio; + uint64_t gpa = mmio->address; int ret = 0; /* Note all RW to IOAPIC are 32-Bit in size */ - if (mmio->access_size == 4U) { - uint32_t data = mmio->value; + if (mmio->size == 4UL) { + uint32_t data = (uint32_t)mmio->value; - if (mmio->read_write == HV_MEM_IO_READ) { + if (mmio->direction == REQUEST_READ) { vioapic_mmio_read(vm, gpa, &data); mmio->value = (uint64_t)data; - mmio->mmio_status = MMIO_TRANS_VALID; - - } else if (mmio->read_write == HV_MEM_IO_WRITE) { + io_req->processed = REQ_STATE_SUCCESS; + } else if (mmio->direction == REQUEST_WRITE) { vioapic_mmio_write(vm, gpa, data); - - mmio->mmio_status = MMIO_TRANS_VALID; + io_req->processed = REQ_STATE_SUCCESS; } else { - /* Can never happen due to the range of read_write. */ + /* Can never happen due to the range of direction. */ } } else { pr_err("All RW to IOAPIC must be 32-bits in size"); diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index c39322ce0..d0a475f83 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -2054,32 +2054,30 @@ vlapic_read_mmio_reg(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, return error; } -int vlapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio, +int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req, __unused void *handler_private_data) { - uint64_t gpa = mmio->paddr; + struct mmio_request *mmio_req = &io_req->reqs.mmio; + uint64_t gpa = mmio_req->address; int ret = 0; /* Note all RW to LAPIC are 32-Bit in size */ - ASSERT(mmio->access_size == 4U, - "All RW to LAPIC must be 32-bits in size"); + ASSERT(mmio_req->size == 4UL, "All RW to LAPIC must be 32-bits in size"); - if (mmio->read_write == HV_MEM_IO_READ) { + if (mmio_req->direction == REQUEST_READ) { ret = vlapic_read_mmio_reg(vcpu, gpa, - &mmio->value, - mmio->access_size); - mmio->mmio_status = MMIO_TRANS_VALID; - - } else if (mmio->read_write == HV_MEM_IO_WRITE) { + &mmio_req->value, + mmio_req->size); + io_req->processed = REQ_STATE_SUCCESS; + } else if (mmio_req->direction == REQUEST_WRITE) { ret = vlapic_write_mmio_reg(vcpu, gpa, - mmio->value, - mmio->access_size); - - mmio->mmio_status = MMIO_TRANS_VALID; + mmio_req->value, + mmio_req->size); + io_req->processed = REQ_STATE_SUCCESS; } else { - /* Can never happen due to the range of mmio->read_write. */ + /* Can never happen due to the range of mmio_req->direction. */ } return ret; @@ -2354,7 +2352,7 @@ apicv_inject_pir(struct vlapic *vlapic) if (pirval != 0UL) { rvi = pirbase + fls64(pirval); - intr_status_old = 0xFFFFU & + intr_status_old = 0xFFFFU & exec_vmread16(VMX_GUEST_INTR_STATUS); intr_status_new = (intr_status_old & 0xFF00U) | rvi; @@ -2371,6 +2369,7 @@ int apic_access_vmexit_handler(struct vcpu *vcpu) uint32_t offset = 0U; uint64_t qual, access_type; struct vlapic *vlapic; + struct mmio_request *mmio = &vcpu->req.reqs.mmio; qual = vcpu->arch_vcpu.exit_qualification; access_type = APIC_ACCESS_TYPE(qual); @@ -2392,10 +2391,10 @@ int apic_access_vmexit_handler(struct vcpu *vcpu) if (access_type == 1UL) { if (emulate_instruction(vcpu) == 0) { - err = vlapic_write(vlapic, 1, offset, vcpu->mmio.value); + err = vlapic_write(vlapic, 1, offset, mmio->value); } } else if (access_type == 0UL) { - err = vlapic_read(vlapic, 1, offset, &vcpu->mmio.value); + err = vlapic_read(vlapic, 1, offset, &mmio->value); if (err < 0) { return err; } diff --git a/hypervisor/arch/x86/io.c b/hypervisor/arch/x86/io.c index d67623c9c..ff26089db 100644 --- a/hypervisor/arch/x86/io.c +++ b/hypervisor/arch/x86/io.c @@ -11,69 +11,77 @@ int dm_emulate_pio_post(struct vcpu *vcpu) uint16_t cur = vcpu->vcpu_id; int cur_context = vcpu->arch_vcpu.cur_context; union vhm_request_buffer *req_buf = NULL; - uint32_t mask = - 0xFFFFFFFFUL >> (32U - (8U * vcpu->req.reqs.pio_request.size)); + struct io_request *io_req = &vcpu->req; + struct pio_request *pio_req = &io_req->reqs.pio; + uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size); uint64_t *rax; + struct vhm_request *vhm_req; req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); + vhm_req = &req_buf->req_queue[cur]; rax = &vcpu->arch_vcpu.contexts[cur_context].guest_cpu_regs.regs.rax; - vcpu->req.reqs.pio_request.value = - req_buf->req_queue[cur].reqs.pio_request.value; + io_req->processed = vhm_req->processed; + pio_req->value = vhm_req->reqs.pio.value; /* VHM emulation data already copy to req, mark to free slot now */ - req_buf->req_queue[cur].valid = false; + vhm_req->valid = 0; - if (req_buf->req_queue[cur].processed != REQ_STATE_SUCCESS) { + if (io_req->processed != REQ_STATE_SUCCESS) { return -1; } - if (vcpu->req.reqs.pio_request.direction == REQUEST_READ) { - *rax = ((*rax) & ~mask) | - (vcpu->req.reqs.pio_request.value & mask); + if (pio_req->direction == REQUEST_READ) { + uint64_t value = (uint64_t)pio_req->value; + *rax = ((*rax) & ~mask) | (value & mask); } return 0; } -static void dm_emulate_pio_pre(struct vcpu *vcpu, uint64_t exit_qual, - uint32_t sz, uint64_t req_value) +static void +dm_emulate_pio_pre(struct vcpu *vcpu, uint64_t exit_qual, uint64_t req_value) { - vcpu->req.type = REQ_PORTIO; - if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) != 0U) { - vcpu->req.reqs.pio_request.direction = REQUEST_READ; - } else { - vcpu->req.reqs.pio_request.direction = REQUEST_WRITE; - } + struct pio_request *pio_req = &vcpu->req.reqs.pio; - vcpu->req.reqs.pio_request.address = - VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual); - vcpu->req.reqs.pio_request.size = sz; - vcpu->req.reqs.pio_request.value = req_value; + pio_req->value = req_value; } int io_instr_vmexit_handler(struct vcpu *vcpu) { - uint32_t sz; - uint32_t mask; - uint32_t port; - int8_t direction; - struct vm_io_handler *handler; uint64_t exit_qual; + uint64_t mask; + uint16_t port, size; + struct vm_io_handler *handler; struct vm *vm = vcpu->vm; + struct io_request *io_req = &vcpu->req; + struct pio_request *pio_req = &io_req->reqs.pio; int cur_context_idx = vcpu->arch_vcpu.cur_context; struct run_context *cur_context; int status = -EINVAL; + io_req->type = REQ_PORTIO; + io_req->processed = REQ_STATE_PENDING; + cur_context = &vcpu->arch_vcpu.contexts[cur_context_idx]; exit_qual = vcpu->arch_vcpu.exit_qualification; - sz = VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) + 1; - port = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual); - direction = VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual); - mask = 0xfffffffful >> (32U - (8U * sz)); + pio_req->size = VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) + 1UL; + pio_req->address = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual); + if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) == 0UL) { + pio_req->direction = REQUEST_WRITE; + } else { + pio_req->direction = REQUEST_READ; + } - TRACE_4I(TRACE_VMEXIT_IO_INSTRUCTION, port, (uint32_t)direction, sz, + size = (uint16_t)pio_req->size; + port = (uint16_t)pio_req->address; + mask = 0xffffffffUL >> (32U - 8U * size); + + TRACE_4I(TRACE_VMEXIT_IO_INSTRUCTION, + (uint32_t)port, + (uint32_t)pio_req->direction, + (uint32_t)size, (uint32_t)cur_context_idx); /* @@ -88,27 +96,27 @@ int io_instr_vmexit_handler(struct vcpu *vcpu) handler; handler = handler->next) { if ((port >= (handler->desc.addr + handler->desc.len)) || - ((port + sz) <= handler->desc.addr)) { + (port + size <= handler->desc.addr)) { continue; - } else if (!((port >= handler->desc.addr) && ((port + sz) + } else if (!((port >= handler->desc.addr) && ((port + size) <= (handler->desc.addr + handler->desc.len)))) { - pr_fatal("Err:IO, port 0x%04x, size=%u spans devices", - port, sz); + pr_fatal("Err:IO, port 0x%04x, size=%hu spans devices", + port, size); status = -EIO; break; } else { struct cpu_gp_regs *regs = &cur_context->guest_cpu_regs.regs; - if (direction == 0) { - handler->desc.io_write(handler, vm, port, sz, + if (pio_req->direction == REQUEST_WRITE) { + handler->desc.io_write(handler, vm, port, size, regs->rax); pr_dbg("IO write on port %04x, data %08x", port, regs->rax & mask); } else { uint32_t data = handler->desc.io_read(handler, - vm, port, sz); + vm, port, size); regs->rax &= ~mask; regs->rax |= data & mask; @@ -123,15 +131,15 @@ int io_instr_vmexit_handler(struct vcpu *vcpu) /* Go for VHM */ if (status == -EINVAL) { - uint64_t *rax = &cur_context->guest_cpu_regs.regs.rax; + uint64_t rax = cur_context->guest_cpu_regs.regs.rax; - (void)memset(&vcpu->req, 0, sizeof(struct vhm_request)); - dm_emulate_pio_pre(vcpu, exit_qual, sz, *rax); - status = acrn_insert_request_wait(vcpu, &vcpu->req); + dm_emulate_pio_pre(vcpu, exit_qual, rax); + status = acrn_insert_request_wait(vcpu, io_req); if (status != 0) { pr_fatal("Err:IO %s access to port 0x%04x, size=%u", - (direction != 0) ? "read" : "write", port, sz); + (pio_req->direction != REQUEST_READ) ? "read" : "write", + port, size); } } diff --git a/hypervisor/common/io_request.c b/hypervisor/common/io_request.c index 82b123775..4067c9f50 100644 --- a/hypervisor/common/io_request.c +++ b/hypervisor/common/io_request.c @@ -32,20 +32,20 @@ static void acrn_print_request(uint16_t vcpu_id, struct vhm_request *req) dev_dbg(ACRN_DBG_IOREQUEST, "[vcpu_id=%hu type=MMIO]", vcpu_id); dev_dbg(ACRN_DBG_IOREQUEST, "gpa=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx", - req->reqs.mmio_request.address, - req->reqs.mmio_request.direction, - req->reqs.mmio_request.size, - req->reqs.mmio_request.value, + req->reqs.mmio.address, + req->reqs.mmio.direction, + req->reqs.mmio.size, + req->reqs.mmio.value, req->processed); break; case REQ_PORTIO: dev_dbg(ACRN_DBG_IOREQUEST, "[vcpu_id=%hu type=PORTIO]", vcpu_id); dev_dbg(ACRN_DBG_IOREQUEST, "IO=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx", - req->reqs.pio_request.address, - req->reqs.pio_request.direction, - req->reqs.pio_request.size, - req->reqs.pio_request.value, + req->reqs.pio.address, + req->reqs.pio.direction, + req->reqs.pio.size, + req->reqs.pio.value, req->processed); break; default: @@ -55,16 +55,19 @@ static void acrn_print_request(uint16_t vcpu_id, struct vhm_request *req) } } -int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req) +int32_t +acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *io_req) { union vhm_request_buffer *req_buf = NULL; + struct vhm_request *vhm_req; uint16_t cur; - ASSERT(sizeof(*req) == (4096U/VHM_REQUEST_MAX), + ASSERT(sizeof(struct vhm_request) == (4096U/VHM_REQUEST_MAX), "vhm_request page broken!"); - if (vcpu == NULL || req == NULL || vcpu->vm->sw.io_shared_page == NULL) { + if (vcpu == NULL || io_req == NULL || + vcpu->vm->sw.io_shared_page == NULL) { return -EINVAL; } @@ -72,8 +75,11 @@ int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req) /* ACRN insert request to VHM and inject upcall */ cur = vcpu->vcpu_id; - (void)memcpy_s(&req_buf->req_queue[cur], sizeof(struct vhm_request), - req, sizeof(struct vhm_request)); + vhm_req = &req_buf->req_queue[cur]; + vhm_req->type = io_req->type; + vhm_req->processed = io_req->processed; + (void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request), + &io_req->reqs, sizeof(union vhm_io_request)); /* pause vcpu, wait for VHM to handle the MMIO request. * TODO: when pause_vcpu changed to switch vcpu out directlly, we @@ -87,9 +93,9 @@ int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req) * before we perform upcall. * because VHM can work in pulling mode without wait for upcall */ - req_buf->req_queue[cur].valid = 1; + vhm_req->valid = 1; - acrn_print_request(vcpu->vcpu_id, &req_buf->req_queue[cur]); + acrn_print_request(vcpu->vcpu_id, vhm_req); /* signal VHM */ fire_vhm_interrupt(); @@ -109,24 +115,24 @@ static void _get_req_info_(struct vhm_request *req, int *id, char *type, switch (req->type) { case REQ_PORTIO: (void)strcpy_s(type, 16U, "PORTIO"); - if (req->reqs.pio_request.direction == REQUEST_READ) { + if (req->reqs.pio.direction == REQUEST_READ) { (void)strcpy_s(dir, 16U, "READ"); } else { (void)strcpy_s(dir, 16U, "WRITE"); } - *addr = req->reqs.pio_request.address; - *val = req->reqs.pio_request.value; + *addr = req->reqs.pio.address; + *val = req->reqs.pio.value; break; case REQ_MMIO: case REQ_WP: (void)strcpy_s(type, 16U, "MMIO/WP"); - if (req->reqs.mmio_request.direction == REQUEST_READ) { + if (req->reqs.mmio.direction == REQUEST_READ) { (void)strcpy_s(dir, 16U, "READ"); } else { (void)strcpy_s(dir, 16U, "WRITE"); } - *addr = req->reqs.mmio_request.address; - *val = req->reqs.mmio_request.value; + *addr = req->reqs.mmio.address; + *val = req->reqs.mmio.value; break; break; default: diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 8a2d11dbe..e0b258c25 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -250,8 +250,7 @@ struct vcpu { uint32_t running; /* vcpu is picked up and run? */ uint32_t ioreq_pending; /* ioreq is ongoing or not? */ - struct vhm_request req; /* used by io/ept emulation */ - struct mem_io mmio; /* used by io/ept emulation */ + struct io_request req; /* used by io/ept emulation */ /* save guest msr tsc aux register. * Before VMENTRY, save guest MSR_TSC_AUX to this fields. diff --git a/hypervisor/include/arch/x86/guest/vioapic.h b/hypervisor/include/arch/x86/guest/vioapic.h index 2db1b5bd9..e196b7e0b 100644 --- a/hypervisor/include/arch/x86/guest/vioapic.h +++ b/hypervisor/include/arch/x86/guest/vioapic.h @@ -52,7 +52,7 @@ void vioapic_mmio_read(struct vm *vm, uint64_t gpa, uint32_t *rval); uint8_t vioapic_pincount(struct vm *vm); void vioapic_process_eoi(struct vm *vm, uint32_t vector); bool vioapic_get_rte(struct vm *vm, uint8_t pin, union ioapic_rte *rte); -int vioapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio, +int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req, void *handler_private_data); #ifdef HV_DEBUG diff --git a/hypervisor/include/arch/x86/guest/vlapic.h b/hypervisor/include/arch/x86/guest/vlapic.h index 9f1a96441..f6449473c 100644 --- a/hypervisor/include/arch/x86/guest/vlapic.h +++ b/hypervisor/include/arch/x86/guest/vlapic.h @@ -111,7 +111,7 @@ void vlapic_set_tmr_one_vec(struct vlapic *vlapic, uint32_t delmode, void vlapic_apicv_batch_set_tmr(struct vlapic *vlapic); -int vlapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio, +int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req, void *handler_private_data); uint32_t vlapic_get_id(struct vlapic *vlapic); diff --git a/hypervisor/include/arch/x86/ioreq.h b/hypervisor/include/arch/x86/ioreq.h index c82606e76..1e946cc34 100644 --- a/hypervisor/include/arch/x86/ioreq.h +++ b/hypervisor/include/arch/x86/ioreq.h @@ -8,6 +8,20 @@ #define IOREQ_H #include +#include + +/* Internal representation of a I/O request. */ +struct io_request { + /** Type of the request (PIO, MMIO, etc). Refer to vhm_request. */ + uint32_t type; + + /** Status of request handling. Written by request handlers and read by + * the I/O emulation framework. Refer to vhm_request. */ + int32_t processed; + + /** Details of this request in the same format as vhm_request. */ + union vhm_io_request reqs; +}; /* Definition of a IO port range */ struct vm_io_range { @@ -80,29 +94,9 @@ struct vm_io_handler { #define IO_ATTR_RW 1U #define IO_ATTR_NO_ACCESS 2U -/* MMIO memory access types */ -enum mem_io_type { - HV_MEM_IO_READ = 0, - HV_MEM_IO_WRITE, -}; - -/* MMIO emulation related structures */ -#define MMIO_TRANS_VALID 1U -#define MMIO_TRANS_INVALID 0U -struct mem_io { - uint64_t paddr; /* Physical address being accessed */ - enum mem_io_type read_write; /* 0 = read / 1 = write operation */ - uint8_t access_size; /* Access size being emulated */ - uint8_t sign_extend_read; /* 1 if sign extension required for read */ - uint64_t value; /* Value read or value to write */ - uint8_t mmio_status; /* Indicates if this MMIO transaction is valid */ - /* Used to store emulation context for this mmio transaction */ - void *private_data; -}; - /* Typedef for MMIO handler and range check routine */ struct mmio_request; -typedef int (*hv_mem_io_handler_t)(struct vcpu *, struct mem_io *, void *); +typedef int (*hv_mem_io_handler_t)(struct vcpu *, struct io_request *, void *); /* Structure for MMIO handler node */ struct mem_io_node { @@ -130,6 +124,6 @@ void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start, uint64_t end); int dm_emulate_mmio_post(struct vcpu *vcpu); -int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req); +int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *req); #endif /* IOREQ_H */ diff --git a/hypervisor/include/public/acrn_common.h b/hypervisor/include/public/acrn_common.h index 82157e760..074a2761a 100644 --- a/hypervisor/include/public/acrn_common.h +++ b/hypervisor/include/public/acrn_common.h @@ -83,6 +83,13 @@ struct pci_request { int32_t reg; } __aligned(8); +union vhm_io_request { + struct pio_request pio; + struct pci_request pci; + struct mmio_request mmio; + int64_t reserved1[8]; +}; + /* vhm_request are 256Bytes aligned */ struct vhm_request { /* offset: 0bytes - 63bytes */ @@ -90,12 +97,7 @@ struct vhm_request { int32_t reserved0[15]; /* offset: 64bytes-127bytes */ - union { - struct pio_request pio_request; - struct pci_request pci_request; - struct mmio_request mmio_request; - int64_t reserved1[8]; - } reqs; + union vhm_io_request reqs; /* True: valid req which need VHM to process. * ACRN write, VHM read only