mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-24 02:08:04 +00:00
hv: dm: Use new I/O request data structures
struct vhm_request -> struct acrn_io_request union vhm_request_buffer -> struct acrn_io_request_buffer struct pio_request -> struct acrn_pio_request struct mmio_request -> struct acrn_mmio_request struct ioreq_notify -> struct acrn_ioreq_notify VHM_REQ_PIO_INVAL -> IOREQ_PIO_INVAL VHM_REQ_MMIO_INVAL -> IOREQ_MMIO_INVAL REQ_PORTIO -> ACRN_IOREQ_TYPE_PORTIO REQ_MMIO -> ACRN_IOREQ_TYPE_MMIO REQ_PCICFG -> ACRN_IOREQ_TYPE_PCICFG REQ_WP -> ACRN_IOREQ_TYPE_WP REQUEST_READ -> ACRN_IOREQ_DIR_READ REQUEST_WRITE -> ACRN_IOREQ_DIR_WRITE REQ_STATE_PROCESSING -> ACRN_IOREQ_STATE_PROCESSING REQ_STATE_PENDING -> ACRN_IOREQ_STATE_PENDING REQ_STATE_COMPLETE -> ACRN_IOREQ_STATE_COMPLETE REQ_STATE_FREE -> ACRN_IOREQ_STATE_FREE IC_CREATE_IOREQ_CLIENT -> ACRN_IOCTL_CREATE_IOREQ_CLIENT IC_DESTROY_IOREQ_CLIENT -> ACRN_IOCTL_DESTROY_IOREQ_CLIENT IC_ATTACH_IOREQ_CLIENT -> ACRN_IOCTL_ATTACH_IOREQ_CLIENT IC_NOTIFY_REQUEST_FINISH -> ACRN_IOCTL_NOTIFY_REQUEST_FINISH IC_CLEAR_VM_IOREQ -> ACRN_IOCTL_CLEAR_VM_IOREQ HYPERVISOR_CALLBACK_VHM_VECTOR -> HYPERVISOR_CALLBACK_HSM_VECTOR arch_fire_vhm_interrupt() -> arch_fire_hsm_interrupt() get_vhm_notification_vector() -> get_hsm_notification_vector() set_vhm_notification_vector() -> set_hsm_notification_vector() acrn_vhm_notification_vector -> acrn_hsm_notification_vector get_vhm_req_state() -> get_io_req_state() set_vhm_req_state() -> set_io_req_state() Below structures have slight difference with former ones. struct acrn_ioreq_notify strcut acrn_io_request Tracked-On: #6282 Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
This commit is contained in:
@@ -600,7 +600,7 @@ static void vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
||||
*/
|
||||
static inline void vie_mmio_read(const struct acrn_vcpu *vcpu, uint64_t *rval)
|
||||
{
|
||||
*rval = vcpu->req.reqs.mmio.value;
|
||||
*rval = vcpu->req.reqs.mmio_request.value;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -608,7 +608,7 @@ static inline void vie_mmio_read(const struct acrn_vcpu *vcpu, uint64_t *rval)
|
||||
*/
|
||||
static inline void vie_mmio_write(struct acrn_vcpu *vcpu, uint64_t wval)
|
||||
{
|
||||
vcpu->req.reqs.mmio.value = wval;
|
||||
vcpu->req.reqs.mmio_request.value = wval;
|
||||
}
|
||||
|
||||
static void vie_calc_bytereg(const struct instr_emul_vie *vie,
|
||||
@@ -1087,7 +1087,7 @@ static int32_t emulate_movs(struct acrn_vcpu *vcpu, const struct instr_emul_vie
|
||||
|
||||
/* update the Memory Operand byte size if necessary */
|
||||
opsize = ((vie->op.op_flags & VIE_OP_F_BYTE_OP) != 0U) ? 1U : vie->opsize;
|
||||
is_mmio_write = (vcpu->req.reqs.mmio.direction == REQUEST_WRITE);
|
||||
is_mmio_write = (vcpu->req.reqs.mmio_request.direction == ACRN_IOREQ_DIR_WRITE);
|
||||
|
||||
/*
|
||||
* XXX although the MOVS instruction is only supposed to be used with
|
||||
@@ -2325,7 +2325,7 @@ static int32_t instr_check_gva(struct acrn_vcpu *vcpu, enum vm_cpu_mode cpu_mode
|
||||
}
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
err_code = (vcpu->req.reqs.mmio.direction == REQUEST_WRITE) ? PAGE_FAULT_WR_FLAG : 0U;
|
||||
err_code = (vcpu->req.reqs.mmio_request.direction == ACRN_IOREQ_DIR_WRITE) ? PAGE_FAULT_WR_FLAG : 0U;
|
||||
|
||||
ret = gva2gpa(vcpu, gva, &gpa, &err_code);
|
||||
if (ret < 0) {
|
||||
|
@@ -143,7 +143,7 @@ static inline uint8_t get_slp_typx(uint32_t pm1_cnt)
|
||||
|
||||
static bool pm1ab_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
|
||||
{
|
||||
struct pio_request *pio_req = &vcpu->req.reqs.pio;
|
||||
struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
|
||||
|
||||
pio_req->value = pio_read(addr, width);
|
||||
|
||||
@@ -314,7 +314,7 @@ static void register_rt_vm_pm1a_ctl_handler(struct acrn_vm *vm)
|
||||
*/
|
||||
static bool prelaunched_vm_sleep_io_read(struct acrn_vcpu *vcpu, __unused uint16_t addr, __unused size_t width)
|
||||
{
|
||||
vcpu->req.reqs.pio.value = 0U;
|
||||
vcpu->req.reqs.pio_request.value = 0U;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@@ -2383,7 +2383,7 @@ int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
uint32_t offset;
|
||||
uint64_t qual, access_type;
|
||||
struct acrn_vlapic *vlapic;
|
||||
struct mmio_request *mmio;
|
||||
struct acrn_mmio_request *mmio;
|
||||
|
||||
qual = vcpu->arch.exit_qualification;
|
||||
access_type = apic_access_type(qual);
|
||||
@@ -2407,7 +2407,7 @@ int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
(decode_instruction(vcpu) >= 0)) {
|
||||
vlapic = vcpu_vlapic(vcpu);
|
||||
offset = (uint32_t)apic_access_offset(qual);
|
||||
mmio = &vcpu->req.reqs.mmio;
|
||||
mmio = &vcpu->req.reqs.mmio_request;
|
||||
if (access_type == TYPE_LINEAR_APIC_INST_WRITE) {
|
||||
err = emulate_instruction(vcpu);
|
||||
if (err == 0) {
|
||||
|
@@ -22,11 +22,11 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
|
||||
struct io_request *io_req = &vcpu->req;
|
||||
|
||||
/* Device model emulates PM1A for post-launched VMs */
|
||||
io_req->io_type = REQ_PORTIO;
|
||||
io_req->reqs.pio.direction = REQUEST_WRITE;
|
||||
io_req->reqs.pio.address = VIRTUAL_PM1A_CNT_ADDR;
|
||||
io_req->reqs.pio.size = 2UL;
|
||||
io_req->reqs.pio.value = (VIRTUAL_PM1A_SLP_EN | (5U << 10U));
|
||||
io_req->io_type = ACRN_IOREQ_TYPE_PORTIO;
|
||||
io_req->reqs.pio_request.direction = ACRN_IOREQ_DIR_WRITE;
|
||||
io_req->reqs.pio_request.address = VIRTUAL_PM1A_CNT_ADDR;
|
||||
io_req->reqs.pio_request.size = 2UL;
|
||||
io_req->reqs.pio_request.value = (VIRTUAL_PM1A_SLP_EN | (5U << 10U));
|
||||
|
||||
/* Inject pm1a S5 request to SOS to shut down the guest */
|
||||
(void)emulate_io(vcpu, io_req);
|
||||
@@ -76,7 +76,7 @@ static bool handle_reset_reg_read(struct acrn_vcpu *vcpu, __unused uint16_t addr
|
||||
* - reset control register 0xcf9: hide this from guests for now.
|
||||
* - FADT reset register: the read behavior is not defined in spec, keep it simple to return all '1'.
|
||||
*/
|
||||
vcpu->req.reqs.pio.value = ~0U;
|
||||
vcpu->req.reqs.pio_request.value = ~0U;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -139,10 +139,10 @@ static bool handle_kb_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t bytes)
|
||||
{
|
||||
if (is_sos_vm(vcpu->vm) && (bytes == 1U)) {
|
||||
/* In case i8042 is defined as ACPI PNP device in BIOS, HV need expose physical 0x64 port. */
|
||||
vcpu->req.reqs.pio.value = pio_read8(addr);
|
||||
vcpu->req.reqs.pio_request.value = pio_read8(addr);
|
||||
} else {
|
||||
/* ACRN will not expose kbd controller to the guest in this case. */
|
||||
vcpu->req.reqs.pio.value = ~0U;
|
||||
vcpu->req.reqs.pio_request.value = ~0U;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@
|
||||
#include <trace.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
void arch_fire_vhm_interrupt(void)
|
||||
void arch_fire_hsm_interrupt(void)
|
||||
{
|
||||
/*
|
||||
* use vLAPIC to inject vector to SOS vcpu 0 if vlapic is enabled
|
||||
@@ -30,25 +30,25 @@ void arch_fire_vhm_interrupt(void)
|
||||
sos_vm = get_sos_vm();
|
||||
vcpu = vcpu_from_vid(sos_vm, BSP_CPU_ID);
|
||||
|
||||
vlapic_set_intr(vcpu, get_vhm_notification_vector(), LAPIC_TRIG_EDGE);
|
||||
vlapic_set_intr(vcpu, get_hsm_notification_vector(), LAPIC_TRIG_EDGE);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief General complete-work for port I/O emulation
|
||||
*
|
||||
* @pre io_req->io_type == REQ_PORTIO
|
||||
* @pre io_req->io_type == ACRN_IOREQ_TYPE_PORTIO
|
||||
*
|
||||
* @remark This function must be called when \p io_req is completed, after
|
||||
* either a previous call to emulate_io() returning 0 or the corresponding VHM
|
||||
* either a previous call to emulate_io() returning 0 or the corresponding IO
|
||||
* request having transferred to the COMPLETE state.
|
||||
*/
|
||||
void
|
||||
emulate_pio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req)
|
||||
{
|
||||
const struct pio_request *pio_req = &io_req->reqs.pio;
|
||||
const struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
|
||||
uint64_t mask = 0xFFFFFFFFUL >> (32UL - (8UL * pio_req->size));
|
||||
|
||||
if (pio_req->direction == REQUEST_READ) {
|
||||
if (pio_req->direction == ACRN_IOREQ_DIR_READ) {
|
||||
uint64_t value = (uint64_t)pio_req->value;
|
||||
uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
|
||||
|
||||
@@ -70,19 +70,19 @@ int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
uint32_t mask;
|
||||
int32_t cur_context_idx = vcpu->arch.cur_context;
|
||||
struct io_request *io_req = &vcpu->req;
|
||||
struct pio_request *pio_req = &io_req->reqs.pio;
|
||||
struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
|
||||
|
||||
exit_qual = vcpu->arch.exit_qualification;
|
||||
|
||||
io_req->io_type = REQ_PORTIO;
|
||||
io_req->io_type = ACRN_IOREQ_TYPE_PORTIO;
|
||||
pio_req->size = vm_exit_io_instruction_size(exit_qual) + 1UL;
|
||||
pio_req->address = vm_exit_io_instruction_port_number(exit_qual);
|
||||
if (vm_exit_io_instruction_access_direction(exit_qual) == 0UL) {
|
||||
mask = 0xFFFFFFFFU >> (32U - (8U * pio_req->size));
|
||||
pio_req->direction = REQUEST_WRITE;
|
||||
pio_req->direction = ACRN_IOREQ_DIR_WRITE;
|
||||
pio_req->value = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RAX) & mask;
|
||||
} else {
|
||||
pio_req->direction = REQUEST_READ;
|
||||
pio_req->direction = ACRN_IOREQ_DIR_READ;
|
||||
}
|
||||
|
||||
TRACE_4I(TRACE_VMEXIT_IO_INSTRUCTION,
|
||||
@@ -102,7 +102,7 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
uint64_t exit_qual;
|
||||
uint64_t gpa;
|
||||
struct io_request *io_req = &vcpu->req;
|
||||
struct mmio_request *mmio_req = &io_req->reqs.mmio;
|
||||
struct acrn_mmio_request *mmio_req = &io_req->reqs.mmio_request;
|
||||
|
||||
/* Handle page fault from guest */
|
||||
exit_qual = vcpu->arch.exit_qualification;
|
||||
@@ -125,21 +125,21 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
status = 0;
|
||||
} else {
|
||||
|
||||
io_req->io_type = REQ_MMIO;
|
||||
io_req->io_type = ACRN_IOREQ_TYPE_MMIO;
|
||||
|
||||
/* Specify if read or write operation */
|
||||
if ((exit_qual & 0x2UL) != 0UL) {
|
||||
/* Write operation */
|
||||
mmio_req->direction = REQUEST_WRITE;
|
||||
mmio_req->direction = ACRN_IOREQ_DIR_WRITE;
|
||||
mmio_req->value = 0UL;
|
||||
|
||||
/* XXX: write access while EPT perm RX -> WP */
|
||||
if ((exit_qual & 0x38UL) == 0x28UL) {
|
||||
io_req->io_type = REQ_WP;
|
||||
io_req->io_type = ACRN_IOREQ_TYPE_WP;
|
||||
}
|
||||
} else {
|
||||
/* Read operation */
|
||||
mmio_req->direction = REQUEST_READ;
|
||||
mmio_req->direction = ACRN_IOREQ_DIR_READ;
|
||||
|
||||
/* TODO: Need to determine how sign extension is determined for
|
||||
* reads
|
||||
@@ -160,7 +160,7 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
*/
|
||||
|
||||
/* Determine value being written. */
|
||||
if (mmio_req->direction == REQUEST_WRITE) {
|
||||
if (mmio_req->direction == ACRN_IOREQ_DIR_WRITE) {
|
||||
status = emulate_instruction(vcpu);
|
||||
if (status != 0) {
|
||||
ret = -EFAULT;
|
||||
|
Reference in New Issue
Block a user