mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-06 19:30:46 +00:00
HV: io: drop REQ_STATE_FAILED
Now the DM has adopted the new VHM request state transitions and REQ_STATE_FAILED is obsolete since neither VHM nor kernel mediators will set the state to FAILED. This patch drops the definition to REQ_STATE_FAILED in the hypervisor, makes ''processed'' unsigned to make the compiler happy about typing and simplifies error handling in the following ways. * (dm_)emulate_(pio|mmio)_post no longer returns an error code, by introducing a constraint that these functions must be called after an I/O request completes (which is the case in the current design) and assuming handlers/VHM/DM will always give a value for reads (typically all 1's if the requested address is invalid). * emulate_io() now returns a positive value IOREQ_PENDING to indicate that the request is sent to VHM. This mitigates a potential race between dm_emulate_pio() and pio_instr_vmexit_handler() which can cause emulate_pio_post() being called twice for the same request. * Remove the ''processed'' member in io_request. Previously this mirrors the state of the VHM request which terminates at either COMPLETE or FAILED. After the FAILED state is removed, the terminal state will always be constantly COMPLETE. Thus the mirrored ''processed'' member is no longer useful. Note that emulate_instruction() will always succeed after a reshuffle, and this patch takes that assumption in advance. This does not hurt as that returned value is not currently handled. This patch makes it explicit that I/O emulation is not expected to fail. One issue remains, though, which occurs when a non-aligned cross-boundary access happens. Currently the hypervisor, VHM and DM adopts different policy: * Hypervisor: inject #GP if it detects that the access crossed boundary * VHM: deliver to DM if the access does not complete falls in the range of a client * DM: a handler covering part of the to-be-accessed region is picked and assertion failure can be triggered. A high-level design covering all these components (in addition to instruction emulation) is needed for this. Thus this patch does not yet cover the issue. Tracked-On: #875 Signed-off-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
@@ -156,7 +156,6 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
|
||||
exit_qual = vcpu->arch_vcpu.exit_qualification;
|
||||
|
||||
io_req->type = REQ_MMIO;
|
||||
io_req->processed = REQ_STATE_PENDING;
|
||||
|
||||
/* Specify if read or write operation */
|
||||
if ((exit_qual & 0x2UL) != 0UL) {
|
||||
@@ -214,14 +213,11 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
|
||||
|
||||
status = emulate_io(vcpu, io_req);
|
||||
|
||||
/* io_req is hypervisor-private. For requests sent to VHM,
|
||||
* io_req->processed will be PENDING till dm_emulate_mmio_post() is
|
||||
* called on vcpu resume. */
|
||||
if (status == 0) {
|
||||
if (io_req->processed != REQ_STATE_PENDING) {
|
||||
status = emulate_mmio_post(vcpu, io_req);
|
||||
}
|
||||
}
|
||||
emulate_mmio_post(vcpu, io_req);
|
||||
} else if (status == IOREQ_PENDING) {
|
||||
status = 0;
|
||||
}
|
||||
|
||||
return status;
|
||||
|
||||
|
@@ -2097,7 +2097,6 @@ int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
|
||||
/* Can never happen due to the range of mmio_req->direction. */
|
||||
}
|
||||
|
||||
io_req->processed = REQ_STATE_COMPLETE;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -16,35 +16,33 @@ static void complete_ioreq(struct vhm_request *vhm_req)
|
||||
|
||||
/**
|
||||
* @pre io_req->type == REQ_PORTIO
|
||||
*
|
||||
* @remark This function must be called when \p io_req is completed, after
|
||||
* either a previous call to emulate_io() returning 0 or the corresponding VHM
|
||||
* request having transferred to the COMPLETE state.
|
||||
*/
|
||||
static int32_t
|
||||
static void
|
||||
emulate_pio_post(struct vcpu *vcpu, struct io_request *io_req)
|
||||
{
|
||||
int32_t status;
|
||||
struct pio_request *pio_req = &io_req->reqs.pio;
|
||||
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);
|
||||
|
||||
if (io_req->processed == REQ_STATE_COMPLETE) {
|
||||
if (pio_req->direction == REQUEST_READ) {
|
||||
uint64_t value = (uint64_t)pio_req->value;
|
||||
int32_t context_idx = vcpu->arch_vcpu.cur_context;
|
||||
uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
|
||||
if (pio_req->direction == REQUEST_READ) {
|
||||
uint64_t value = (uint64_t)pio_req->value;
|
||||
uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
|
||||
|
||||
rax = ((rax) & ~mask) | (value & mask);
|
||||
vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
|
||||
}
|
||||
status = 0;
|
||||
} else {
|
||||
status = -1;
|
||||
rax = ((rax) & ~mask) | (value & mask);
|
||||
vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre vcpu->req.type == REQ_PORTIO
|
||||
*
|
||||
* @remark This function must be called after the VHM request corresponding to
|
||||
* \p vcpu being transferred to the COMPLETE state.
|
||||
*/
|
||||
int32_t dm_emulate_pio_post(struct vcpu *vcpu)
|
||||
void dm_emulate_pio_post(struct vcpu *vcpu)
|
||||
{
|
||||
uint16_t cur = vcpu->vcpu_id;
|
||||
union vhm_request_buffer *req_buf = NULL;
|
||||
@@ -60,35 +58,33 @@ int32_t dm_emulate_pio_post(struct vcpu *vcpu)
|
||||
/* VHM emulation data already copy to req, mark to free slot now */
|
||||
complete_ioreq(vhm_req);
|
||||
|
||||
return emulate_pio_post(vcpu, io_req);
|
||||
emulate_pio_post(vcpu, io_req);
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre vcpu->req.type == REQ_MMIO
|
||||
*
|
||||
* @remark This function must be called when \p io_req is completed, after
|
||||
* either a previous call to emulate_io() returning 0 or the corresponding VHM
|
||||
* request having transferred to the COMPLETE state.
|
||||
*/
|
||||
int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
|
||||
void emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
|
||||
{
|
||||
int32_t ret;
|
||||
struct mmio_request *mmio_req = &io_req->reqs.mmio;
|
||||
|
||||
if (io_req->processed == REQ_STATE_COMPLETE) {
|
||||
if (mmio_req->direction == REQUEST_READ) {
|
||||
/* Emulate instruction and update vcpu register set */
|
||||
ret = emulate_instruction(vcpu);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
} else {
|
||||
ret = 0;
|
||||
if (mmio_req->direction == REQUEST_READ) {
|
||||
/* Emulate instruction and update vcpu register set */
|
||||
emulate_instruction(vcpu);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre vcpu->req.type == REQ_MMIO
|
||||
*
|
||||
* @remark This function must be called after the VHM request corresponding to
|
||||
* \p vcpu being transferred to the COMPLETE state.
|
||||
*/
|
||||
int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
|
||||
void dm_emulate_mmio_post(struct vcpu *vcpu)
|
||||
{
|
||||
uint16_t cur = vcpu->vcpu_id;
|
||||
struct io_request *io_req = &vcpu->req;
|
||||
@@ -104,7 +100,7 @@ int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
|
||||
/* VHM emulation data already copy to req, mark to free slot now */
|
||||
complete_ioreq(vhm_req);
|
||||
|
||||
return emulate_mmio_post(vcpu, io_req);
|
||||
emulate_mmio_post(vcpu, io_req);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARTITION_MODE
|
||||
@@ -123,15 +119,12 @@ void emulate_io_post(struct vcpu *vcpu)
|
||||
{
|
||||
union vhm_request_buffer *req_buf;
|
||||
struct vhm_request *vhm_req;
|
||||
struct io_request *io_req = &vcpu->req;
|
||||
|
||||
req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page;
|
||||
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
|
||||
io_req->processed = atomic_load32(&vhm_req->processed);
|
||||
|
||||
if ((vhm_req->valid == 0) ||
|
||||
((io_req->processed != REQ_STATE_COMPLETE) &&
|
||||
(io_req->processed != REQ_STATE_FAILED))) {
|
||||
(atomic_load32(&vhm_req->processed) != REQ_STATE_COMPLETE)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -205,7 +198,6 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
|
||||
pr_fatal("Err:IO, port 0x%04x, size=%hu spans devices",
|
||||
port, size);
|
||||
status = -EIO;
|
||||
io_req->processed = REQ_STATE_FAILED;
|
||||
break;
|
||||
} else {
|
||||
if (pio_req->direction == REQUEST_WRITE) {
|
||||
@@ -221,9 +213,6 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
|
||||
pr_dbg("IO read on port %04x, data %08x",
|
||||
port, pio_req->value);
|
||||
}
|
||||
/* TODO: failures in the handlers should be reflected
|
||||
* here. */
|
||||
io_req->processed = REQ_STATE_COMPLETE;
|
||||
status = 0;
|
||||
break;
|
||||
}
|
||||
@@ -266,7 +255,6 @@ hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
|
||||
} else if (!((address >= base) && (address + size <= end))) {
|
||||
pr_fatal("Err MMIO, address:0x%llx, size:%x",
|
||||
address, size);
|
||||
io_req->processed = REQ_STATE_FAILED;
|
||||
return -EIO;
|
||||
} else {
|
||||
/* Handle this MMIO operation */
|
||||
@@ -284,6 +272,7 @@ hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
|
||||
* deliver to VHM.
|
||||
*
|
||||
* @return 0 - Successfully emulated by registered handlers.
|
||||
* @return IOREQ_PENDING - The I/O request is delivered to VHM.
|
||||
* @return -EIO - The request spans multiple devices and cannot be emulated.
|
||||
* @return Negative on other errors during emulation.
|
||||
*/
|
||||
@@ -303,7 +292,6 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
|
||||
default:
|
||||
/* Unknown I/O request type */
|
||||
status = -EINVAL;
|
||||
io_req->processed = REQ_STATE_FAILED;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -329,6 +317,8 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
|
||||
pr_fatal("Err:IO %s access to port 0x%04lx, size=%lu",
|
||||
(pio_req->direction != REQUEST_READ) ? "read" : "write",
|
||||
pio_req->address, pio_req->size);
|
||||
} else {
|
||||
status = IOREQ_PENDING;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -347,7 +337,6 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
|
||||
exit_qual = vcpu->arch_vcpu.exit_qualification;
|
||||
|
||||
io_req->type = REQ_PORTIO;
|
||||
io_req->processed = REQ_STATE_PENDING;
|
||||
pio_req->size = VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) + 1UL;
|
||||
pio_req->address = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual);
|
||||
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) == 0UL) {
|
||||
@@ -365,13 +354,10 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
|
||||
|
||||
status = emulate_io(vcpu, io_req);
|
||||
|
||||
/* io_req is hypervisor-private. For requests sent to VHM,
|
||||
* io_req->processed will be PENDING till dm_emulate_pio_post() is
|
||||
* called on vcpu resume. */
|
||||
if (status == 0) {
|
||||
if (io_req->processed != REQ_STATE_PENDING) {
|
||||
status = emulate_pio_post(vcpu, io_req);
|
||||
}
|
||||
emulate_pio_post(vcpu, io_req);
|
||||
} else if (status == IOREQ_PENDING) {
|
||||
status = 0;
|
||||
}
|
||||
|
||||
return status;
|
||||
|
Reference in New Issue
Block a user