mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2026-01-06 16:15:00 +00:00
HV: io: drop REQ_STATE_FAILED
Now the DM has adopted the new VHM request state transitions and REQ_STATE_FAILED is obsolete since neither VHM nor kernel mediators will set the state to FAILED. This patch drops the definition to REQ_STATE_FAILED in the hypervisor, makes ''processed'' unsigned to make the compiler happy about typing and simplifies error handling in the following ways. * (dm_)emulate_(pio|mmio)_post no longer returns an error code, by introducing a constraint that these functions must be called after an I/O request completes (which is the case in the current design) and assuming handlers/VHM/DM will always give a value for reads (typically all 1's if the requested address is invalid). * emulate_io() now returns a positive value IOREQ_PENDING to indicate that the request is sent to VHM. This mitigates a potential race between dm_emulate_pio() and pio_instr_vmexit_handler() which can cause emulate_pio_post() being called twice for the same request. * Remove the ''processed'' member in io_request. Previously this mirrors the state of the VHM request which terminates at either COMPLETE or FAILED. After the FAILED state is removed, the terminal state will always be constantly COMPLETE. Thus the mirrored ''processed'' member is no longer useful. Note that emulate_instruction() will always succeed after a reshuffle, and this patch takes that assumption in advance. This does not hurt as that returned value is not currently handled. This patch makes it explicit that I/O emulation is not expected to fail. One issue remains, though, which occurs when a non-aligned cross-boundary access happens. Currently the hypervisor, VHM and DM adopts different policy: * Hypervisor: inject #GP if it detects that the access crossed boundary * VHM: deliver to DM if the access does not complete falls in the range of a client * DM: a handler covering part of the to-be-accessed region is picked and assertion failure can be triggered. A high-level design covering all these components (in addition to instruction emulation) is needed for this. Thus this patch does not yet cover the issue. Tracked-On: #875 Signed-off-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
@@ -10,15 +10,15 @@
|
||||
#include <types.h>
|
||||
#include <acrn_common.h>
|
||||
|
||||
/* The return value of emulate_io() indicating the I/O request is delivered to
|
||||
* VHM but not finished yet. */
|
||||
#define IOREQ_PENDING 1
|
||||
|
||||
/* Internal representation of a I/O request. */
|
||||
struct io_request {
|
||||
/** Type of the request (PIO, MMIO, etc). Refer to vhm_request. */
|
||||
uint32_t type;
|
||||
|
||||
/** Status of request handling. Written by request handlers and read by
|
||||
* the I/O emulation framework. Refer to vhm_request. */
|
||||
int32_t processed;
|
||||
|
||||
/** Details of this request in the same format as vhm_request. */
|
||||
union vhm_io_request reqs;
|
||||
};
|
||||
@@ -122,8 +122,8 @@ int register_mmio_emulation_handler(struct vm *vm,
|
||||
uint64_t end, void *handler_private_data);
|
||||
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
|
||||
uint64_t end);
|
||||
int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
|
||||
int32_t dm_emulate_mmio_post(struct vcpu *vcpu);
|
||||
void emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
|
||||
void dm_emulate_mmio_post(struct vcpu *vcpu);
|
||||
|
||||
int32_t emulate_io(struct vcpu *vcpu, struct io_request *io_req);
|
||||
void emulate_io_post(struct vcpu *vcpu);
|
||||
|
||||
Reference in New Issue
Block a user