hv: dm: Use new I/O request data structures

struct vhm_request		->	struct acrn_io_request
union vhm_request_buffer	->	struct acrn_io_request_buffer
struct pio_request		->	struct acrn_pio_request
struct mmio_request		->	struct acrn_mmio_request
struct ioreq_notify		->	struct acrn_ioreq_notify

VHM_REQ_PIO_INVAL		->	IOREQ_PIO_INVAL
VHM_REQ_MMIO_INVAL		->	IOREQ_MMIO_INVAL
REQ_PORTIO			->	ACRN_IOREQ_TYPE_PORTIO
REQ_MMIO			->	ACRN_IOREQ_TYPE_MMIO
REQ_PCICFG			->	ACRN_IOREQ_TYPE_PCICFG
REQ_WP				->	ACRN_IOREQ_TYPE_WP

REQUEST_READ			->	ACRN_IOREQ_DIR_READ
REQUEST_WRITE			->	ACRN_IOREQ_DIR_WRITE
REQ_STATE_PROCESSING		->	ACRN_IOREQ_STATE_PROCESSING
REQ_STATE_PENDING		->	ACRN_IOREQ_STATE_PENDING
REQ_STATE_COMPLETE		->	ACRN_IOREQ_STATE_COMPLETE
REQ_STATE_FREE			->	ACRN_IOREQ_STATE_FREE

IC_CREATE_IOREQ_CLIENT		->	ACRN_IOCTL_CREATE_IOREQ_CLIENT
IC_DESTROY_IOREQ_CLIENT		->	ACRN_IOCTL_DESTROY_IOREQ_CLIENT
IC_ATTACH_IOREQ_CLIENT		->	ACRN_IOCTL_ATTACH_IOREQ_CLIENT
IC_NOTIFY_REQUEST_FINISH	->	ACRN_IOCTL_NOTIFY_REQUEST_FINISH
IC_CLEAR_VM_IOREQ		->	ACRN_IOCTL_CLEAR_VM_IOREQ
HYPERVISOR_CALLBACK_VHM_VECTOR	->	HYPERVISOR_CALLBACK_HSM_VECTOR

arch_fire_vhm_interrupt()	->	arch_fire_hsm_interrupt()
get_vhm_notification_vector()	->	get_hsm_notification_vector()
set_vhm_notification_vector()	->	set_hsm_notification_vector()
acrn_vhm_notification_vector	->	acrn_hsm_notification_vector
get_vhm_req_state()		->	get_io_req_state()
set_vhm_req_state()		->	set_io_req_state()

Below structures have slight difference with former ones.

  struct acrn_ioreq_notify
  strcut acrn_io_request

Tracked-On: #6282
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
This commit is contained in:
Shuo A Liu 2021-07-07 15:38:07 +08:00 committed by wenlingz
parent 3c66ba7ef5
commit 9c910bae44
30 changed files with 307 additions and 304 deletions

View File

@ -87,7 +87,7 @@ register_default_iohandler(int start, int size)
} }
int int
emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *pio_request) emulate_inout(struct vmctx *ctx, int *pvcpu, struct acrn_pio_request *pio_request)
{ {
int bytes, flags, in, port; int bytes, flags, in, port;
inout_func_t handler; inout_func_t handler;
@ -95,7 +95,7 @@ emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *pio_request)
int retval; int retval;
bytes = pio_request->size; bytes = pio_request->size;
in = (pio_request->direction == REQUEST_READ); in = (pio_request->direction == ACRN_IOREQ_DIR_READ);
port = pio_request->address; port = pio_request->address;
if ((port + bytes - 1 >= MAX_IOPORTS) || if ((port + bytes - 1 >= MAX_IOPORTS) ||
@ -106,7 +106,7 @@ emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *pio_request)
flags = inout_handlers[port].flags; flags = inout_handlers[port].flags;
arg = inout_handlers[port].arg; arg = inout_handlers[port].arg;
if (pio_request->direction == REQUEST_READ) { if (pio_request->direction == ACRN_IOREQ_DIR_READ) {
if (!(flags & IOPORT_F_IN)) if (!(flags & IOPORT_F_IN))
return -1; return -1;
} else { } else {

View File

@ -73,11 +73,11 @@
#define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */ #define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */
/* Values returned for reads on invalid I/O requests. */ /* Values returned for reads on invalid I/O requests. */
#define VHM_REQ_PIO_INVAL (~0U) #define IOREQ_PIO_INVAL (~0U)
#define VHM_REQ_MMIO_INVAL (~0UL) #define IOREQ_MMIO_INVAL (~0UL)
typedef void (*vmexit_handler_t)(struct vmctx *, typedef void (*vmexit_handler_t)(struct vmctx *,
struct vhm_request *, int *vcpu); struct acrn_io_request *, int *vcpu);
char *vmname; char *vmname;
@ -114,10 +114,10 @@ static cpuset_t cpumask;
static void vm_loop(struct vmctx *ctx); static void vm_loop(struct vmctx *ctx);
static char vhm_request_page[4096] __aligned(4096); static char io_request_page[4096] __aligned(4096);
static struct vhm_request *vhm_req_buf = static struct acrn_io_request *ioreq_buf =
(struct vhm_request *)&vhm_request_page; (struct acrn_io_request *)&io_request_page;
struct dmstats { struct dmstats {
uint64_t vmexit_bogus; uint64_t vmexit_bogus;
@ -302,16 +302,16 @@ notify_vmloop_thread(void)
#endif #endif
static void static void
vmexit_inout(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu) vmexit_inout(struct vmctx *ctx, struct acrn_io_request *io_req, int *pvcpu)
{ {
int error; int error;
int bytes, port, in; int bytes, port, in;
port = vhm_req->reqs.pio.address; port = io_req->reqs.pio_request.address;
bytes = vhm_req->reqs.pio.size; bytes = io_req->reqs.pio_request.size;
in = (vhm_req->reqs.pio.direction == REQUEST_READ); in = (io_req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ);
error = emulate_inout(ctx, pvcpu, &vhm_req->reqs.pio); error = emulate_inout(ctx, pvcpu, &io_req->reqs.pio_request);
if (error) { if (error) {
pr_err("Unhandled %s%c 0x%04x\n", pr_err("Unhandled %s%c 0x%04x\n",
in ? "in" : "out", in ? "in" : "out",
@ -319,56 +319,56 @@ vmexit_inout(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
port); port);
if (in) { if (in) {
vhm_req->reqs.pio.value = VHM_REQ_PIO_INVAL; io_req->reqs.pio_request.value = IOREQ_PIO_INVAL;
} }
} }
} }
static void static void
vmexit_mmio_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu) vmexit_mmio_emul(struct vmctx *ctx, struct acrn_io_request *io_req, int *pvcpu)
{ {
int err; int err;
stats.vmexit_mmio_emul++; stats.vmexit_mmio_emul++;
err = emulate_mem(ctx, &vhm_req->reqs.mmio); err = emulate_mem(ctx, &io_req->reqs.mmio_request);
if (err) { if (err) {
if (err == -ESRCH) if (err == -ESRCH)
pr_err("Unhandled memory access to 0x%lx\n", pr_err("Unhandled memory access to 0x%lx\n",
vhm_req->reqs.mmio.address); io_req->reqs.mmio_request.address);
pr_err("Failed to emulate instruction ["); pr_err("Failed to emulate instruction [");
pr_err("mmio address 0x%lx, size %ld", pr_err("mmio address 0x%lx, size %ld",
vhm_req->reqs.mmio.address, io_req->reqs.mmio_request.address,
vhm_req->reqs.mmio.size); io_req->reqs.mmio_request.size);
if (vhm_req->reqs.mmio.direction == REQUEST_READ) { if (io_req->reqs.mmio_request.direction == ACRN_IOREQ_DIR_READ) {
vhm_req->reqs.mmio.value = VHM_REQ_MMIO_INVAL; io_req->reqs.mmio_request.value = IOREQ_MMIO_INVAL;
} }
} }
} }
static void static void
vmexit_pci_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu) vmexit_pci_emul(struct vmctx *ctx, struct acrn_io_request *io_req, int *pvcpu)
{ {
int err, in = (vhm_req->reqs.pci.direction == REQUEST_READ); int err, in = (io_req->reqs.pci_request.direction == ACRN_IOREQ_DIR_READ);
err = emulate_pci_cfgrw(ctx, *pvcpu, in, err = emulate_pci_cfgrw(ctx, *pvcpu, in,
vhm_req->reqs.pci.bus, io_req->reqs.pci_request.bus,
vhm_req->reqs.pci.dev, io_req->reqs.pci_request.dev,
vhm_req->reqs.pci.func, io_req->reqs.pci_request.func,
vhm_req->reqs.pci.reg, io_req->reqs.pci_request.reg,
vhm_req->reqs.pci.size, io_req->reqs.pci_request.size,
&vhm_req->reqs.pci.value); &io_req->reqs.pci_request.value);
if (err) { if (err) {
pr_err("Unhandled pci cfg rw at %x:%x.%x reg 0x%x\n", pr_err("Unhandled pci cfg rw at %x:%x.%x reg 0x%x\n",
vhm_req->reqs.pci.bus, io_req->reqs.pci_request.bus,
vhm_req->reqs.pci.dev, io_req->reqs.pci_request.dev,
vhm_req->reqs.pci.func, io_req->reqs.pci_request.func,
vhm_req->reqs.pci.reg); io_req->reqs.pci_request.reg);
if (in) { if (in) {
vhm_req->reqs.pio.value = VHM_REQ_PIO_INVAL; io_req->reqs.pio_request.value = IOREQ_PIO_INVAL;
} }
} }
} }
@ -397,24 +397,24 @@ static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
}; };
static void static void
handle_vmexit(struct vmctx *ctx, struct vhm_request *vhm_req, int vcpu) handle_vmexit(struct vmctx *ctx, struct acrn_io_request *io_req, int vcpu)
{ {
enum vm_exitcode exitcode; enum vm_exitcode exitcode;
exitcode = vhm_req->type; exitcode = io_req->type;
if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) { if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
pr_err("handle vmexit: unexpected exitcode 0x%x\n", pr_err("handle vmexit: unexpected exitcode 0x%x\n",
exitcode); exitcode);
exit(1); exit(1);
} }
(*handler[exitcode])(ctx, vhm_req, &vcpu); (*handler[exitcode])(ctx, io_req, &vcpu);
/* We cannot notify the VHM/hypervisor on the request completion at this /* We cannot notify the HSM/hypervisor on the request completion at this
* point if the UOS is in suspend or system reset mode, as the VM is * point if the UOS is in suspend or system reset mode, as the VM is
* still not paused and a notification can kick off the vcpu to run * still not paused and a notification can kick off the vcpu to run
* again. Postpone the notification till vm_system_reset() or * again. Postpone the notification till vm_system_reset() or
* vm_suspend_resume() for resetting the ioreq states in the VHM and * vm_suspend_resume() for resetting the ioreq states in the HSM and
* hypervisor. * hypervisor.
*/ */
if ((VM_SUSPEND_SYSTEM_RESET == vm_get_suspend_mode()) || if ((VM_SUSPEND_SYSTEM_RESET == vm_get_suspend_mode()) ||
@ -633,10 +633,10 @@ vm_system_reset(struct vmctx *ctx)
* request which is the APIC PM CR write. VM reset will reset it * request which is the APIC PM CR write. VM reset will reset it
* *
* When handling emergency mode triggered by one vcpu without * When handling emergency mode triggered by one vcpu without
* offlining any other vcpus, there can be multiple VHM requests * offlining any other vcpus, there can be multiple IO requests
* with various states. We should be careful on potential races * with various states. We should be careful on potential races
* when resetting especially in SMP SOS. vm_clear_ioreq can be used * when resetting especially in SMP SOS. vm_clear_ioreq can be used
* to clear all ioreq status in VHM after VM pause, then let VM * to clear all ioreq status in HSM after VM pause, then let VM
* reset in hypervisor reset all ioreqs. * reset in hypervisor reset all ioreqs.
*/ */
vm_clear_ioreq(ctx); vm_clear_ioreq(ctx);
@ -698,17 +698,17 @@ vm_loop(struct vmctx *ctx)
while (1) { while (1) {
int vcpu_id; int vcpu_id;
struct vhm_request *vhm_req; struct acrn_io_request *io_req;
error = vm_attach_ioreq_client(ctx); error = vm_attach_ioreq_client(ctx);
if (error) if (error)
break; break;
for (vcpu_id = 0; vcpu_id < guest_ncpus; vcpu_id++) { for (vcpu_id = 0; vcpu_id < guest_ncpus; vcpu_id++) {
vhm_req = &vhm_req_buf[vcpu_id]; io_req = &ioreq_buf[vcpu_id];
if ((atomic_load(&vhm_req->processed) == REQ_STATE_PROCESSING) if ((atomic_load(&io_req->processed) == ACRN_IOREQ_STATE_PROCESSING)
&& (vhm_req->client == ctx->ioreq_client)) && !io_req->kernel_handled)
handle_vmexit(ctx, vhm_req, vcpu_id); handle_vmexit(ctx, io_req, vcpu_id);
} }
if (VM_SUSPEND_FULL_RESET == vm_get_suspend_mode() || if (VM_SUSPEND_FULL_RESET == vm_get_suspend_mode() ||
@ -1022,7 +1022,7 @@ main(int argc, char *argv[])
for (;;) { for (;;) {
pr_notice("vm_create: %s\n", vmname); pr_notice("vm_create: %s\n", vmname);
ctx = vm_create(vmname, (unsigned long)vhm_req_buf, &guest_ncpus); ctx = vm_create(vmname, (unsigned long)ioreq_buf, &guest_ncpus);
if (!ctx) { if (!ctx) {
pr_err("vm_create failed"); pr_err("vm_create failed");
goto create_fail; goto create_fail;

View File

@ -150,7 +150,7 @@ mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
} }
int int
emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req) emulate_mem(struct vmctx *ctx, struct acrn_mmio_request *mmio_req)
{ {
uint64_t paddr = mmio_req->address; uint64_t paddr = mmio_req->address;
int size = mmio_req->size; int size = mmio_req->size;
@ -179,7 +179,7 @@ emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
if (entry == NULL) if (entry == NULL)
return -EINVAL; return -EINVAL;
if (mmio_req->direction == REQUEST_READ) if (mmio_req->direction == ACRN_IOREQ_DIR_READ)
err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req->value, err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req->value,
size, &entry->mr_param); size, &entry->mr_param);
else else

View File

@ -259,13 +259,13 @@ err:
int int
vm_create_ioreq_client(struct vmctx *ctx) vm_create_ioreq_client(struct vmctx *ctx)
{ {
return ioctl(ctx->fd, IC_CREATE_IOREQ_CLIENT, 0); return ioctl(ctx->fd, ACRN_IOCTL_CREATE_IOREQ_CLIENT, 0);
} }
int int
vm_destroy_ioreq_client(struct vmctx *ctx) vm_destroy_ioreq_client(struct vmctx *ctx)
{ {
return ioctl(ctx->fd, IC_DESTROY_IOREQ_CLIENT, ctx->ioreq_client); return ioctl(ctx->fd, ACRN_IOCTL_DESTROY_IOREQ_CLIENT, ctx->ioreq_client);
} }
int int
@ -273,7 +273,7 @@ vm_attach_ioreq_client(struct vmctx *ctx)
{ {
int error; int error;
error = ioctl(ctx->fd, IC_ATTACH_IOREQ_CLIENT, ctx->ioreq_client); error = ioctl(ctx->fd, ACRN_IOCTL_ATTACH_IOREQ_CLIENT, ctx->ioreq_client);
if (error) { if (error) {
pr_err("attach ioreq client return %d " pr_err("attach ioreq client return %d "
@ -289,13 +289,13 @@ int
vm_notify_request_done(struct vmctx *ctx, int vcpu) vm_notify_request_done(struct vmctx *ctx, int vcpu)
{ {
int error; int error;
struct ioreq_notify notify; struct acrn_ioreq_notify notify;
bzero(&notify, sizeof(notify)); bzero(&notify, sizeof(notify));
notify.client_id = ctx->ioreq_client; notify.vmid = ctx->vmid;
notify.vcpu = vcpu; notify.vcpu = vcpu;
error = ioctl(ctx->fd, IC_NOTIFY_REQUEST_FINISH, &notify); error = ioctl(ctx->fd, ACRN_IOCTL_NOTIFY_REQUEST_FINISH, &notify);
if (error) { if (error) {
pr_err("failed: notify request finish\n"); pr_err("failed: notify request finish\n");
@ -488,7 +488,7 @@ vm_reset(struct vmctx *ctx)
void void
vm_clear_ioreq(struct vmctx *ctx) vm_clear_ioreq(struct vmctx *ctx)
{ {
ioctl(ctx->fd, IC_CLEAR_VM_IOREQ, NULL); ioctl(ctx->fd, ACRN_IOCTL_CLEAR_VM_IOREQ, NULL);
} }
static enum vm_suspend_how suspend_mode = VM_SUSPEND_NONE; static enum vm_suspend_how suspend_mode = VM_SUSPEND_NONE;

View File

@ -32,7 +32,6 @@
#include "types.h" #include "types.h"
#include "acrn_common.h" #include "acrn_common.h"
struct vmctx; struct vmctx;
struct vhm_request;
/* /*
* inout emulation handlers return 0 on success and -1 on failure. * inout emulation handlers return 0 on success and -1 on failure.
@ -71,7 +70,7 @@ struct inout_port {
DATA_SET(inout_port_set, __CONCAT(__inout_port, __LINE__)) DATA_SET(inout_port_set, __CONCAT(__inout_port, __LINE__))
void init_inout(void); void init_inout(void);
int emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *req); int emulate_inout(struct vmctx *ctx, int *pvcpu, struct acrn_pio_request *req);
int register_inout(struct inout_port *iop); int register_inout(struct inout_port *iop);
int unregister_inout(struct inout_port *iop); int unregister_inout(struct inout_port *iop);

View File

@ -50,7 +50,7 @@ struct mem_range {
#define MEM_F_RW (MEM_F_READ | MEM_F_WRITE) #define MEM_F_RW (MEM_F_READ | MEM_F_WRITE)
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */ #define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
int emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req); int emulate_mem(struct vmctx *ctx, struct acrn_mmio_request *mmio_req);
int register_mem(struct mem_range *memp); int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp); int register_mem_fallback(struct mem_range *memp);
int unregister_mem(struct mem_range *memp); int unregister_mem(struct mem_range *memp);

View File

@ -94,13 +94,16 @@
_IOW(ACRN_IOCTL_TYPE, 0x25, __u64) _IOW(ACRN_IOCTL_TYPE, 0x25, __u64)
/* DM ioreq management */ /* DM ioreq management */
#define IC_ID_IOREQ_BASE 0x30UL #define ACRN_IOCTL_NOTIFY_REQUEST_FINISH \
#define IC_SET_IOREQ_BUFFER _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x00) _IOW(ACRN_IOCTL_TYPE, 0x31, struct acrn_ioreq_notify)
#define IC_NOTIFY_REQUEST_FINISH _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x01) #define ACRN_IOCTL_CREATE_IOREQ_CLIENT \
#define IC_CREATE_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x02) _IO(ACRN_IOCTL_TYPE, 0x32)
#define IC_ATTACH_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x03) #define ACRN_IOCTL_ATTACH_IOREQ_CLIENT \
#define IC_DESTROY_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x04) _IO(ACRN_IOCTL_TYPE, 0x33)
#define IC_CLEAR_VM_IOREQ _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x05) #define ACRN_IOCTL_DESTROY_IOREQ_CLIENT \
_IO(ACRN_IOCTL_TYPE, 0x34)
#define ACRN_IOCTL_CLEAR_VM_IOREQ \
_IO(ACRN_IOCTL_TYPE, 0x35)
/* Guest memory management */ /* Guest memory management */
#define IC_ID_MEM_BASE 0x40UL #define IC_ID_MEM_BASE 0x40UL
@ -286,11 +289,12 @@ struct ic_ptdev_irq {
/** /**
* @brief data strcture to notify hypervisor ioreq is handled * @brief data strcture to notify hypervisor ioreq is handled
*/ */
struct ioreq_notify { struct acrn_ioreq_notify {
/** client id to identify ioreq client */ /** VM id to identify ioreq client */
int32_t client_id; __u16 vmid;
__u16 reserved;
/** identify the ioreq submitter */ /** identify the ioreq submitter */
uint32_t vcpu; __u32 vcpu;
}; };
#define ACRN_PLATFORM_LAPIC_IDS_MAX 64 #define ACRN_PLATFORM_LAPIC_IDS_MAX 64

View File

@ -600,7 +600,7 @@ static void vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
*/ */
static inline void vie_mmio_read(const struct acrn_vcpu *vcpu, uint64_t *rval) static inline void vie_mmio_read(const struct acrn_vcpu *vcpu, uint64_t *rval)
{ {
*rval = vcpu->req.reqs.mmio.value; *rval = vcpu->req.reqs.mmio_request.value;
} }
/* /*
@ -608,7 +608,7 @@ static inline void vie_mmio_read(const struct acrn_vcpu *vcpu, uint64_t *rval)
*/ */
static inline void vie_mmio_write(struct acrn_vcpu *vcpu, uint64_t wval) static inline void vie_mmio_write(struct acrn_vcpu *vcpu, uint64_t wval)
{ {
vcpu->req.reqs.mmio.value = wval; vcpu->req.reqs.mmio_request.value = wval;
} }
static void vie_calc_bytereg(const struct instr_emul_vie *vie, static void vie_calc_bytereg(const struct instr_emul_vie *vie,
@ -1087,7 +1087,7 @@ static int32_t emulate_movs(struct acrn_vcpu *vcpu, const struct instr_emul_vie
/* update the Memory Operand byte size if necessary */ /* update the Memory Operand byte size if necessary */
opsize = ((vie->op.op_flags & VIE_OP_F_BYTE_OP) != 0U) ? 1U : vie->opsize; opsize = ((vie->op.op_flags & VIE_OP_F_BYTE_OP) != 0U) ? 1U : vie->opsize;
is_mmio_write = (vcpu->req.reqs.mmio.direction == REQUEST_WRITE); is_mmio_write = (vcpu->req.reqs.mmio_request.direction == ACRN_IOREQ_DIR_WRITE);
/* /*
* XXX although the MOVS instruction is only supposed to be used with * XXX although the MOVS instruction is only supposed to be used with
@ -2325,7 +2325,7 @@ static int32_t instr_check_gva(struct acrn_vcpu *vcpu, enum vm_cpu_mode cpu_mode
} }
ret = -EFAULT; ret = -EFAULT;
} else { } else {
err_code = (vcpu->req.reqs.mmio.direction == REQUEST_WRITE) ? PAGE_FAULT_WR_FLAG : 0U; err_code = (vcpu->req.reqs.mmio_request.direction == ACRN_IOREQ_DIR_WRITE) ? PAGE_FAULT_WR_FLAG : 0U;
ret = gva2gpa(vcpu, gva, &gpa, &err_code); ret = gva2gpa(vcpu, gva, &gpa, &err_code);
if (ret < 0) { if (ret < 0) {

View File

@ -143,7 +143,7 @@ static inline uint8_t get_slp_typx(uint32_t pm1_cnt)
static bool pm1ab_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width) static bool pm1ab_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
{ {
struct pio_request *pio_req = &vcpu->req.reqs.pio; struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
pio_req->value = pio_read(addr, width); pio_req->value = pio_read(addr, width);
@ -314,7 +314,7 @@ static void register_rt_vm_pm1a_ctl_handler(struct acrn_vm *vm)
*/ */
static bool prelaunched_vm_sleep_io_read(struct acrn_vcpu *vcpu, __unused uint16_t addr, __unused size_t width) static bool prelaunched_vm_sleep_io_read(struct acrn_vcpu *vcpu, __unused uint16_t addr, __unused size_t width)
{ {
vcpu->req.reqs.pio.value = 0U; vcpu->req.reqs.pio_request.value = 0U;
return true; return true;
} }

View File

@ -2383,7 +2383,7 @@ int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
uint32_t offset; uint32_t offset;
uint64_t qual, access_type; uint64_t qual, access_type;
struct acrn_vlapic *vlapic; struct acrn_vlapic *vlapic;
struct mmio_request *mmio; struct acrn_mmio_request *mmio;
qual = vcpu->arch.exit_qualification; qual = vcpu->arch.exit_qualification;
access_type = apic_access_type(qual); access_type = apic_access_type(qual);
@ -2407,7 +2407,7 @@ int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
(decode_instruction(vcpu) >= 0)) { (decode_instruction(vcpu) >= 0)) {
vlapic = vcpu_vlapic(vcpu); vlapic = vcpu_vlapic(vcpu);
offset = (uint32_t)apic_access_offset(qual); offset = (uint32_t)apic_access_offset(qual);
mmio = &vcpu->req.reqs.mmio; mmio = &vcpu->req.reqs.mmio_request;
if (access_type == TYPE_LINEAR_APIC_INST_WRITE) { if (access_type == TYPE_LINEAR_APIC_INST_WRITE) {
err = emulate_instruction(vcpu); err = emulate_instruction(vcpu);
if (err == 0) { if (err == 0) {

View File

@ -22,11 +22,11 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
struct io_request *io_req = &vcpu->req; struct io_request *io_req = &vcpu->req;
/* Device model emulates PM1A for post-launched VMs */ /* Device model emulates PM1A for post-launched VMs */
io_req->io_type = REQ_PORTIO; io_req->io_type = ACRN_IOREQ_TYPE_PORTIO;
io_req->reqs.pio.direction = REQUEST_WRITE; io_req->reqs.pio_request.direction = ACRN_IOREQ_DIR_WRITE;
io_req->reqs.pio.address = VIRTUAL_PM1A_CNT_ADDR; io_req->reqs.pio_request.address = VIRTUAL_PM1A_CNT_ADDR;
io_req->reqs.pio.size = 2UL; io_req->reqs.pio_request.size = 2UL;
io_req->reqs.pio.value = (VIRTUAL_PM1A_SLP_EN | (5U << 10U)); io_req->reqs.pio_request.value = (VIRTUAL_PM1A_SLP_EN | (5U << 10U));
/* Inject pm1a S5 request to SOS to shut down the guest */ /* Inject pm1a S5 request to SOS to shut down the guest */
(void)emulate_io(vcpu, io_req); (void)emulate_io(vcpu, io_req);
@ -76,7 +76,7 @@ static bool handle_reset_reg_read(struct acrn_vcpu *vcpu, __unused uint16_t addr
* - reset control register 0xcf9: hide this from guests for now. * - reset control register 0xcf9: hide this from guests for now.
* - FADT reset register: the read behavior is not defined in spec, keep it simple to return all '1'. * - FADT reset register: the read behavior is not defined in spec, keep it simple to return all '1'.
*/ */
vcpu->req.reqs.pio.value = ~0U; vcpu->req.reqs.pio_request.value = ~0U;
} }
return ret; return ret;
@ -139,10 +139,10 @@ static bool handle_kb_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t bytes)
{ {
if (is_sos_vm(vcpu->vm) && (bytes == 1U)) { if (is_sos_vm(vcpu->vm) && (bytes == 1U)) {
/* In case i8042 is defined as ACPI PNP device in BIOS, HV need expose physical 0x64 port. */ /* In case i8042 is defined as ACPI PNP device in BIOS, HV need expose physical 0x64 port. */
vcpu->req.reqs.pio.value = pio_read8(addr); vcpu->req.reqs.pio_request.value = pio_read8(addr);
} else { } else {
/* ACRN will not expose kbd controller to the guest in this case. */ /* ACRN will not expose kbd controller to the guest in this case. */
vcpu->req.reqs.pio.value = ~0U; vcpu->req.reqs.pio_request.value = ~0U;
} }
return true; return true;
} }

View File

@ -18,7 +18,7 @@
#include <trace.h> #include <trace.h>
#include <logmsg.h> #include <logmsg.h>
void arch_fire_vhm_interrupt(void) void arch_fire_hsm_interrupt(void)
{ {
/* /*
* use vLAPIC to inject vector to SOS vcpu 0 if vlapic is enabled * use vLAPIC to inject vector to SOS vcpu 0 if vlapic is enabled
@ -30,25 +30,25 @@ void arch_fire_vhm_interrupt(void)
sos_vm = get_sos_vm(); sos_vm = get_sos_vm();
vcpu = vcpu_from_vid(sos_vm, BSP_CPU_ID); vcpu = vcpu_from_vid(sos_vm, BSP_CPU_ID);
vlapic_set_intr(vcpu, get_vhm_notification_vector(), LAPIC_TRIG_EDGE); vlapic_set_intr(vcpu, get_hsm_notification_vector(), LAPIC_TRIG_EDGE);
} }
/** /**
* @brief General complete-work for port I/O emulation * @brief General complete-work for port I/O emulation
* *
* @pre io_req->io_type == REQ_PORTIO * @pre io_req->io_type == ACRN_IOREQ_TYPE_PORTIO
* *
* @remark This function must be called when \p io_req is completed, after * @remark This function must be called when \p io_req is completed, after
* either a previous call to emulate_io() returning 0 or the corresponding VHM * either a previous call to emulate_io() returning 0 or the corresponding IO
* request having transferred to the COMPLETE state. * request having transferred to the COMPLETE state.
*/ */
void void
emulate_pio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req) emulate_pio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req)
{ {
const struct pio_request *pio_req = &io_req->reqs.pio; const struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
uint64_t mask = 0xFFFFFFFFUL >> (32UL - (8UL * pio_req->size)); uint64_t mask = 0xFFFFFFFFUL >> (32UL - (8UL * pio_req->size));
if (pio_req->direction == REQUEST_READ) { if (pio_req->direction == ACRN_IOREQ_DIR_READ) {
uint64_t value = (uint64_t)pio_req->value; uint64_t value = (uint64_t)pio_req->value;
uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX); uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
@ -70,19 +70,19 @@ int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu)
uint32_t mask; uint32_t mask;
int32_t cur_context_idx = vcpu->arch.cur_context; int32_t cur_context_idx = vcpu->arch.cur_context;
struct io_request *io_req = &vcpu->req; struct io_request *io_req = &vcpu->req;
struct pio_request *pio_req = &io_req->reqs.pio; struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
exit_qual = vcpu->arch.exit_qualification; exit_qual = vcpu->arch.exit_qualification;
io_req->io_type = REQ_PORTIO; io_req->io_type = ACRN_IOREQ_TYPE_PORTIO;
pio_req->size = vm_exit_io_instruction_size(exit_qual) + 1UL; pio_req->size = vm_exit_io_instruction_size(exit_qual) + 1UL;
pio_req->address = vm_exit_io_instruction_port_number(exit_qual); pio_req->address = vm_exit_io_instruction_port_number(exit_qual);
if (vm_exit_io_instruction_access_direction(exit_qual) == 0UL) { if (vm_exit_io_instruction_access_direction(exit_qual) == 0UL) {
mask = 0xFFFFFFFFU >> (32U - (8U * pio_req->size)); mask = 0xFFFFFFFFU >> (32U - (8U * pio_req->size));
pio_req->direction = REQUEST_WRITE; pio_req->direction = ACRN_IOREQ_DIR_WRITE;
pio_req->value = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RAX) & mask; pio_req->value = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RAX) & mask;
} else { } else {
pio_req->direction = REQUEST_READ; pio_req->direction = ACRN_IOREQ_DIR_READ;
} }
TRACE_4I(TRACE_VMEXIT_IO_INSTRUCTION, TRACE_4I(TRACE_VMEXIT_IO_INSTRUCTION,
@ -102,7 +102,7 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
uint64_t exit_qual; uint64_t exit_qual;
uint64_t gpa; uint64_t gpa;
struct io_request *io_req = &vcpu->req; struct io_request *io_req = &vcpu->req;
struct mmio_request *mmio_req = &io_req->reqs.mmio; struct acrn_mmio_request *mmio_req = &io_req->reqs.mmio_request;
/* Handle page fault from guest */ /* Handle page fault from guest */
exit_qual = vcpu->arch.exit_qualification; exit_qual = vcpu->arch.exit_qualification;
@ -125,21 +125,21 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
status = 0; status = 0;
} else { } else {
io_req->io_type = REQ_MMIO; io_req->io_type = ACRN_IOREQ_TYPE_MMIO;
/* Specify if read or write operation */ /* Specify if read or write operation */
if ((exit_qual & 0x2UL) != 0UL) { if ((exit_qual & 0x2UL) != 0UL) {
/* Write operation */ /* Write operation */
mmio_req->direction = REQUEST_WRITE; mmio_req->direction = ACRN_IOREQ_DIR_WRITE;
mmio_req->value = 0UL; mmio_req->value = 0UL;
/* XXX: write access while EPT perm RX -> WP */ /* XXX: write access while EPT perm RX -> WP */
if ((exit_qual & 0x38UL) == 0x28UL) { if ((exit_qual & 0x38UL) == 0x28UL) {
io_req->io_type = REQ_WP; io_req->io_type = ACRN_IOREQ_TYPE_WP;
} }
} else { } else {
/* Read operation */ /* Read operation */
mmio_req->direction = REQUEST_READ; mmio_req->direction = ACRN_IOREQ_DIR_READ;
/* TODO: Need to determine how sign extension is determined for /* TODO: Need to determine how sign extension is determined for
* reads * reads
@ -160,7 +160,7 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
*/ */
/* Determine value being written. */ /* Determine value being written. */
if (mmio_req->direction == REQUEST_WRITE) { if (mmio_req->direction == ACRN_IOREQ_DIR_WRITE) {
status = emulate_instruction(vcpu); status = emulate_instruction(vcpu);
if (status != 0) { if (status != 0) {
ret = -EFAULT; ret = -EFAULT;

View File

@ -527,8 +527,7 @@ int32_t hcall_inject_msi(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, __un
* *
* @param vcpu Pointer to vCPU that initiates the hypercall * @param vcpu Pointer to vCPU that initiates the hypercall
* @param target_vm Pointer to target VM data structure * @param target_vm Pointer to target VM data structure
* @param param2 guest physical address. This gpa points to * @param param2 guest physical address. This gpa points to buffer address
* struct acrn_set_ioreq_buffer
* *
* @pre is_sos_vm(vcpu->vm) * @pre is_sos_vm(vcpu->vm)
* @return 0 on success, non-zero on error. * @return 0 on success, non-zero on error.
@ -542,21 +541,21 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm
int32_t ret = -1; int32_t ret = -1;
if (is_created_vm(target_vm)) { if (is_created_vm(target_vm)) {
struct acrn_set_ioreq_buffer iobuf; uint64_t iobuf;
if (copy_from_gpa(vm, &iobuf, param2, sizeof(iobuf)) == 0) { if (copy_from_gpa(vm, &iobuf, param2, sizeof(iobuf)) == 0) {
dev_dbg(DBG_LEVEL_HYCALL, "[%d] SET BUFFER=0x%p", dev_dbg(DBG_LEVEL_HYCALL, "[%d] SET BUFFER=0x%p",
target_vm->vm_id, iobuf.req_buf); target_vm->vm_id, iobuf);
hpa = gpa2hpa(vm, iobuf.req_buf); hpa = gpa2hpa(vm, iobuf);
if (hpa == INVALID_HPA) { if (hpa == INVALID_HPA) {
pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.", pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.",
__func__, vm->vm_id, iobuf.req_buf); __func__, vm->vm_id, iobuf);
target_vm->sw.io_shared_page = NULL; target_vm->sw.io_shared_page = NULL;
} else { } else {
target_vm->sw.io_shared_page = hpa2hva(hpa); target_vm->sw.io_shared_page = hpa2hva(hpa);
for (i = 0U; i < VHM_REQUEST_MAX; i++) { for (i = 0U; i < ACRN_IO_REQUEST_MAX; i++) {
set_vhm_req_state(target_vm, i, REQ_STATE_FREE); set_io_req_state(target_vm, i, ACRN_IOREQ_STATE_FREE);
} }
ret = 0; ret = 0;
} }
@ -1232,7 +1231,7 @@ int32_t hcall_set_callback_vector(__unused struct acrn_vcpu *vcpu, __unused stru
pr_err("%s: Invalid passed vector\n", __func__); pr_err("%s: Invalid passed vector\n", __func__);
ret = -EINVAL; ret = -EINVAL;
} else { } else {
set_vhm_notification_vector((uint32_t)param1); set_hsm_notification_vector((uint32_t)param1);
ret = 0; ret = 0;
} }

View File

@ -10,34 +10,34 @@
#define DBG_LEVEL_IOREQ 6U #define DBG_LEVEL_IOREQ 6U
static uint32_t acrn_vhm_notification_vector = HYPERVISOR_CALLBACK_VHM_VECTOR; static uint32_t acrn_hsm_notification_vector = HYPERVISOR_CALLBACK_HSM_VECTOR;
#define MMIO_DEFAULT_VALUE_SIZE_1 (0xFFUL) #define MMIO_DEFAULT_VALUE_SIZE_1 (0xFFUL)
#define MMIO_DEFAULT_VALUE_SIZE_2 (0xFFFFUL) #define MMIO_DEFAULT_VALUE_SIZE_2 (0xFFFFUL)
#define MMIO_DEFAULT_VALUE_SIZE_4 (0xFFFFFFFFUL) #define MMIO_DEFAULT_VALUE_SIZE_4 (0xFFFFFFFFUL)
#define MMIO_DEFAULT_VALUE_SIZE_8 (0xFFFFFFFFFFFFFFFFUL) #define MMIO_DEFAULT_VALUE_SIZE_8 (0xFFFFFFFFFFFFFFFFUL)
#if defined(HV_DEBUG) #if defined(HV_DEBUG)
__unused static void acrn_print_request(uint16_t vcpu_id, const struct vhm_request *req) __unused static void acrn_print_request(uint16_t vcpu_id, const struct acrn_io_request *req)
{ {
switch (req->type) { switch (req->type) {
case REQ_MMIO: case ACRN_IOREQ_TYPE_MMIO:
dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=MMIO]", vcpu_id); dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=MMIO]", vcpu_id);
dev_dbg(DBG_LEVEL_IOREQ, dev_dbg(DBG_LEVEL_IOREQ,
"gpa=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx", "gpa=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
req->reqs.mmio.address, req->reqs.mmio_request.address,
req->reqs.mmio.direction, req->reqs.mmio_request.direction,
req->reqs.mmio.size, req->reqs.mmio_request.size,
req->reqs.mmio.value, req->reqs.mmio_request.value,
req->processed); req->processed);
break; break;
case REQ_PORTIO: case ACRN_IOREQ_TYPE_PORTIO:
dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=PORTIO]", vcpu_id); dev_dbg(DBG_LEVEL_IOREQ, "[vcpu_id=%hu type=PORTIO]", vcpu_id);
dev_dbg(DBG_LEVEL_IOREQ, dev_dbg(DBG_LEVEL_IOREQ,
"IO=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx", "IO=0x%lx, R/W=%d, size=%ld value=0x%lx processed=%lx",
req->reqs.pio.address, req->reqs.pio_request.address,
req->reqs.pio.direction, req->reqs.pio_request.direction,
req->reqs.pio.size, req->reqs.pio_request.size,
req->reqs.pio.value, req->reqs.pio_request.value,
req->processed); req->processed);
break; break;
default: default:
@ -59,14 +59,14 @@ void reset_vm_ioreqs(struct acrn_vm *vm)
{ {
uint16_t i; uint16_t i;
for (i = 0U; i < VHM_REQUEST_MAX; i++) { for (i = 0U; i < ACRN_IO_REQUEST_MAX; i++) {
set_vhm_req_state(vm, i, REQ_STATE_FREE); set_io_req_state(vm, i, ACRN_IOREQ_STATE_FREE);
} }
} }
static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu) static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu)
{ {
return (get_vhm_req_state(vcpu->vm, vcpu->vcpu_id) == REQ_STATE_COMPLETE); return (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_COMPLETE);
} }
/** /**
@ -79,42 +79,42 @@ static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu)
*/ */
int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_req) int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_req)
{ {
union vhm_request_buffer *req_buf = NULL; struct acrn_io_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req; struct acrn_io_request *acrn_io_req;
bool is_polling = false; bool is_polling = false;
int32_t ret = 0; int32_t ret = 0;
uint16_t cur; uint16_t cur;
if ((vcpu->vm->sw.io_shared_page != NULL) if ((vcpu->vm->sw.io_shared_page != NULL)
&& (get_vhm_req_state(vcpu->vm, vcpu->vcpu_id) == REQ_STATE_FREE)) { && (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_FREE)) {
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); req_buf = (struct acrn_io_request_buffer *)(vcpu->vm->sw.io_shared_page);
cur = vcpu->vcpu_id; cur = vcpu->vcpu_id;
stac(); stac();
vhm_req = &req_buf->req_queue[cur]; acrn_io_req = &req_buf->req_slot[cur];
/* ACRN insert request to VHM and inject upcall */ /* ACRN insert request to HSM and inject upcall */
vhm_req->type = io_req->io_type; acrn_io_req->type = io_req->io_type;
(void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request), (void)memcpy_s(&acrn_io_req->reqs, sizeof(acrn_io_req->reqs),
&io_req->reqs, sizeof(union vhm_io_request)); &io_req->reqs, sizeof(acrn_io_req->reqs));
if (vcpu->vm->sw.is_polling_ioreq) { if (vcpu->vm->sw.is_polling_ioreq) {
vhm_req->completion_polling = 1U; acrn_io_req->completion_polling = 1U;
is_polling = true; is_polling = true;
} }
clac(); clac();
/* Before updating the vhm_req state, enforce all fill vhm_req operations done */ /* Before updating the acrn_io_req state, enforce all fill acrn_io_req operations done */
cpu_write_memory_barrier(); cpu_write_memory_barrier();
/* Must clear the signal before we mark req as pending /* Must clear the signal before we mark req as pending
* Once we mark it pending, VHM may process req and signal us * Once we mark it pending, HSM may process req and signal us
* before we perform upcall. * before we perform upcall.
* because VHM can work in pulling mode without wait for upcall * because HSM can work in pulling mode without wait for upcall
*/ */
set_vhm_req_state(vcpu->vm, vcpu->vcpu_id, REQ_STATE_PENDING); set_io_req_state(vcpu->vm, vcpu->vcpu_id, ACRN_IOREQ_STATE_PENDING);
/* signal VHM */ /* signal HSM */
arch_fire_vhm_interrupt(); arch_fire_hsm_interrupt();
/* Polling completion of the request in polling mode */ /* Polling completion of the request in polling mode */
if (is_polling) { if (is_polling) {
@ -138,53 +138,53 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
return ret; return ret;
} }
uint32_t get_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id) uint32_t get_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id)
{ {
uint32_t state; uint32_t state;
union vhm_request_buffer *req_buf = NULL; struct acrn_io_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req; struct acrn_io_request *acrn_io_req;
req_buf = (union vhm_request_buffer *)vm->sw.io_shared_page; req_buf = (struct acrn_io_request_buffer *)vm->sw.io_shared_page;
if (req_buf == NULL) { if (req_buf == NULL) {
state = 0xffffffffU; state = 0xffffffffU;
} else { } else {
stac(); stac();
vhm_req = &req_buf->req_queue[vhm_req_id]; acrn_io_req = &req_buf->req_slot[vcpu_id];
state = vhm_req->processed; state = acrn_io_req->processed;
clac(); clac();
} }
return state; return state;
} }
void set_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id, uint32_t state) void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t state)
{ {
union vhm_request_buffer *req_buf = NULL; struct acrn_io_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req; struct acrn_io_request *acrn_io_req;
req_buf = (union vhm_request_buffer *)vm->sw.io_shared_page; req_buf = (struct acrn_io_request_buffer *)vm->sw.io_shared_page;
if (req_buf != NULL) { if (req_buf != NULL) {
stac(); stac();
vhm_req = &req_buf->req_queue[vhm_req_id]; acrn_io_req = &req_buf->req_slot[vcpu_id];
/* /*
* HV will only set processed to REQ_STATE_PENDING or REQ_STATE_FREE. * HV will only set processed to ACRN_IOREQ_STATE_PENDING or ACRN_IOREQ_STATE_FREE.
* we don't need to sfence here is that even if the SOS/DM sees the previous state, * we don't need to sfence here is that even if the SOS/DM sees the previous state,
* the only side effect is that it will defer the processing of the new IOReq. * the only side effect is that it will defer the processing of the new IOReq.
* It won't lead wrong processing. * It won't lead wrong processing.
*/ */
vhm_req->processed = state; acrn_io_req->processed = state;
clac(); clac();
} }
} }
void set_vhm_notification_vector(uint32_t vector) void set_hsm_notification_vector(uint32_t vector)
{ {
acrn_vhm_notification_vector = vector; acrn_hsm_notification_vector = vector;
} }
uint32_t get_vhm_notification_vector(void) uint32_t get_hsm_notification_vector(void)
{ {
return acrn_vhm_notification_vector; return acrn_hsm_notification_vector;
} }
/** /**
@ -193,17 +193,17 @@ uint32_t get_vhm_notification_vector(void)
* @param vcpu The virtual CPU that triggers the MMIO access * @param vcpu The virtual CPU that triggers the MMIO access
* @param io_req The I/O request holding the details of the MMIO access * @param io_req The I/O request holding the details of the MMIO access
* *
* @pre io_req->io_type == REQ_MMIO * @pre io_req->io_type == ACRN_IOREQ_TYPE_MMIO
* *
* @remark This function must be called when \p io_req is completed, after * @remark This function must be called when \p io_req is completed, after
* either a previous call to emulate_io() returning 0 or the corresponding VHM * either a previous call to emulate_io() returning 0 or the corresponding HSM
* request transferring to the COMPLETE state. * request transferring to the COMPLETE state.
*/ */
static void emulate_mmio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req) static void emulate_mmio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req)
{ {
const struct mmio_request *mmio_req = &io_req->reqs.mmio; const struct acrn_mmio_request *mmio_req = &io_req->reqs.mmio_request;
if (mmio_req->direction == REQUEST_READ) { if (mmio_req->direction == ACRN_IOREQ_DIR_READ) {
/* Emulate instruction and update vcpu register set */ /* Emulate instruction and update vcpu register set */
(void)emulate_instruction(vcpu); (void)emulate_instruction(vcpu);
} }
@ -211,21 +211,21 @@ static void emulate_mmio_complete(struct acrn_vcpu *vcpu, const struct io_reques
static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req) static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
{ {
union vhm_request_buffer *req_buf = NULL; struct acrn_io_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req; struct acrn_io_request *acrn_io_req;
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); req_buf = (struct acrn_io_request_buffer *)(vcpu->vm->sw.io_shared_page);
stac(); stac();
vhm_req = &req_buf->req_queue[vcpu->vcpu_id]; acrn_io_req = &req_buf->req_slot[vcpu->vcpu_id];
if (io_req != NULL) { if (io_req != NULL) {
switch (vcpu->req.io_type) { switch (vcpu->req.io_type) {
case REQ_PORTIO: case ACRN_IOREQ_TYPE_PORTIO:
io_req->reqs.pio.value = vhm_req->reqs.pio.value; io_req->reqs.pio_request.value = acrn_io_req->reqs.pio_request.value;
break; break;
case REQ_MMIO: case ACRN_IOREQ_TYPE_MMIO:
io_req->reqs.mmio.value = vhm_req->reqs.mmio.value; io_req->reqs.mmio_request.value = acrn_io_req->reqs.mmio_request.value;
break; break;
default: default:
@ -235,19 +235,19 @@ static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
} }
/* /*
* Only HV will check whether processed is REQ_STATE_FREE on per-vCPU before inject a ioreq. * Only HV will check whether processed is ACRN_IOREQ_STATE_FREE on per-vCPU before inject a ioreq.
* Only HV will set processed to REQ_STATE_FREE when ioreq is done. * Only HV will set processed to ACRN_IOREQ_STATE_FREE when ioreq is done.
*/ */
vhm_req->processed = REQ_STATE_FREE; acrn_io_req->processed = ACRN_IOREQ_STATE_FREE;
clac(); clac();
} }
/** /**
* @brief Complete-work of VHM requests for port I/O emulation * @brief Complete-work of HSM requests for port I/O emulation
* *
* @pre vcpu->req.io_type == REQ_PORTIO * @pre vcpu->req.io_type == ACRN_IOREQ_TYPE_PORTIO
* *
* @remark This function must be called after the VHM request corresponding to * @remark This function must be called after the HSM request corresponding to
* \p vcpu being transferred to the COMPLETE state. * \p vcpu being transferred to the COMPLETE state.
*/ */
static void dm_emulate_pio_complete(struct acrn_vcpu *vcpu) static void dm_emulate_pio_complete(struct acrn_vcpu *vcpu)
@ -260,13 +260,13 @@ static void dm_emulate_pio_complete(struct acrn_vcpu *vcpu)
} }
/** /**
* @brief Complete-work of VHM requests for MMIO emulation * @brief Complete-work of HSM requests for MMIO emulation
* *
* @param vcpu The virtual CPU that triggers the MMIO access * @param vcpu The virtual CPU that triggers the MMIO access
* *
* @pre vcpu->req.io_type == REQ_MMIO * @pre vcpu->req.io_type == ACRN_IOREQ_TYPE_MMIO
* *
* @remark This function must be called after the VHM request corresponding to * @remark This function must be called after the HSM request corresponding to
* \p vcpu being transferred to the COMPLETE state. * \p vcpu being transferred to the COMPLETE state.
*/ */
static void dm_emulate_mmio_complete(struct acrn_vcpu *vcpu) static void dm_emulate_mmio_complete(struct acrn_vcpu *vcpu)
@ -279,13 +279,13 @@ static void dm_emulate_mmio_complete(struct acrn_vcpu *vcpu)
} }
/** /**
* @brief General complete-work for all kinds of VHM requests for I/O emulation * @brief General complete-work for all kinds of HSM requests for I/O emulation
* *
* @param vcpu The virtual CPU that triggers the MMIO access * @param vcpu The virtual CPU that triggers the MMIO access
*/ */
static void dm_emulate_io_complete(struct acrn_vcpu *vcpu) static void dm_emulate_io_complete(struct acrn_vcpu *vcpu)
{ {
if (get_vhm_req_state(vcpu->vm, vcpu->vcpu_id) == REQ_STATE_COMPLETE) { if (get_io_req_state(vcpu->vm, vcpu->vcpu_id) == ACRN_IOREQ_STATE_COMPLETE) {
/* /*
* If vcpu is in Zombie state and will be destroyed soon. Just * If vcpu is in Zombie state and will be destroyed soon. Just
* mark ioreq done and don't resume vcpu. * mark ioreq done and don't resume vcpu.
@ -294,26 +294,27 @@ static void dm_emulate_io_complete(struct acrn_vcpu *vcpu)
complete_ioreq(vcpu, NULL); complete_ioreq(vcpu, NULL);
} else { } else {
switch (vcpu->req.io_type) { switch (vcpu->req.io_type) {
case REQ_MMIO: case ACRN_IOREQ_TYPE_MMIO:
dm_emulate_mmio_complete(vcpu); dm_emulate_mmio_complete(vcpu);
break; break;
case REQ_PORTIO: case ACRN_IOREQ_TYPE_PORTIO:
case REQ_PCICFG: case ACRN_IOREQ_TYPE_PCICFG:
/* /*
* REQ_PORTIO on 0xcf8 & 0xcfc may switch to REQ_PCICFG in some * ACRN_IOREQ_TYPE_PORTIO on 0xcf8 & 0xcfc may switch to
* cases. It works to apply the post-work for REQ_PORTIO on * ACRN_IOREQ_TYPE_PCICFG in some cases. It works to apply the post-work
* REQ_PCICFG because the format of the first 28 bytes of * for ACRN_IOREQ_TYPE_PORTIO on ACRN_IOREQ_TYPE_PCICFG because the
* REQ_PORTIO & REQ_PCICFG requests are exactly the same and * format of the first 28 bytes of ACRN_IOREQ_TYPE_PORTIO &
* post-work is mainly interested in the read value. * ACRN_IOREQ_TYPE_PCICFG requests are exactly the same and post-work
* is mainly interested in the read value.
*/ */
dm_emulate_pio_complete(vcpu); dm_emulate_pio_complete(vcpu);
break; break;
default: default:
/* /*
* REQ_WP can only be triggered on writes which do not need * ACRN_IOREQ_TYPE_WP can only be triggered on writes which do
* post-work. Just mark the ioreq done. * not need post-work. Just mark the ioreq done.
*/ */
complete_ioreq(vcpu, NULL); complete_ioreq(vcpu, NULL);
break; break;
@ -331,7 +332,7 @@ static void dm_emulate_io_complete(struct acrn_vcpu *vcpu)
static bool pio_default_read(struct acrn_vcpu *vcpu, static bool pio_default_read(struct acrn_vcpu *vcpu,
__unused uint16_t addr, size_t width) __unused uint16_t addr, size_t width)
{ {
struct pio_request *pio_req = &vcpu->req.reqs.pio; struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
pio_req->value = (uint32_t)((1UL << (width * 8U)) - 1UL); pio_req->value = (uint32_t)((1UL << (width * 8U)) - 1UL);
@ -356,9 +357,9 @@ static bool pio_default_write(__unused struct acrn_vcpu *vcpu, __unused uint16_t
static int32_t mmio_default_access_handler(struct io_request *io_req, static int32_t mmio_default_access_handler(struct io_request *io_req,
__unused void *handler_private_data) __unused void *handler_private_data)
{ {
struct mmio_request *mmio = &io_req->reqs.mmio; struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
if (mmio->direction == REQUEST_READ) { if (mmio->direction == ACRN_IOREQ_DIR_READ) {
switch (mmio->size) { switch (mmio->size) {
case 1U: case 1U:
mmio->value = MMIO_DEFAULT_VALUE_SIZE_1; mmio->value = MMIO_DEFAULT_VALUE_SIZE_1;
@ -385,7 +386,7 @@ static int32_t mmio_default_access_handler(struct io_request *io_req,
* Try handling the given request by any port I/O handler registered in the * Try handling the given request by any port I/O handler registered in the
* hypervisor. * hypervisor.
* *
* @pre io_req->io_type == REQ_PORTIO * @pre io_req->io_type == ACRN_IOREQ_TYPE_PORTIO
* *
* @retval 0 Successfully emulated by registered handlers. * @retval 0 Successfully emulated by registered handlers.
* @retval -ENODEV No proper handler found. * @retval -ENODEV No proper handler found.
@ -398,7 +399,7 @@ hv_emulate_pio(struct acrn_vcpu *vcpu, struct io_request *io_req)
uint16_t port, size; uint16_t port, size;
uint32_t idx; uint32_t idx;
struct acrn_vm *vm = vcpu->vm; struct acrn_vm *vm = vcpu->vm;
struct pio_request *pio_req = &io_req->reqs.pio; struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
struct vm_io_handler_desc *handler; struct vm_io_handler_desc *handler;
io_read_fn_t io_read = NULL; io_read_fn_t io_read = NULL;
io_write_fn_t io_write = NULL; io_write_fn_t io_write = NULL;
@ -427,11 +428,11 @@ hv_emulate_pio(struct acrn_vcpu *vcpu, struct io_request *io_req)
break; break;
} }
if ((pio_req->direction == REQUEST_WRITE) && (io_write != NULL)) { if ((pio_req->direction == ACRN_IOREQ_DIR_WRITE) && (io_write != NULL)) {
if (io_write(vcpu, port, size, pio_req->value)) { if (io_write(vcpu, port, size, pio_req->value)) {
status = 0; status = 0;
} }
} else if ((pio_req->direction == REQUEST_READ) && (io_read != NULL)) { } else if ((pio_req->direction == ACRN_IOREQ_DIR_READ) && (io_read != NULL)) {
if (io_read(vcpu, port, size)) { if (io_read(vcpu, port, size)) {
status = 0; status = 0;
} }
@ -440,7 +441,7 @@ hv_emulate_pio(struct acrn_vcpu *vcpu, struct io_request *io_req)
} }
pr_dbg("IO %s on port %04x, data %08x", pr_dbg("IO %s on port %04x, data %08x",
(pio_req->direction == REQUEST_READ) ? "read" : "write", port, pio_req->value); (pio_req->direction == ACRN_IOREQ_DIR_READ) ? "read" : "write", port, pio_req->value);
return status; return status;
} }
@ -449,7 +450,7 @@ hv_emulate_pio(struct acrn_vcpu *vcpu, struct io_request *io_req)
* Use registered MMIO handlers on the given request if it falls in the range of * Use registered MMIO handlers on the given request if it falls in the range of
* any of them. * any of them.
* *
* @pre io_req->io_type == REQ_MMIO * @pre io_req->io_type == ACRN_IOREQ_TYPE_MMIO
* *
* @retval 0 Successfully emulated by registered handlers. * @retval 0 Successfully emulated by registered handlers.
* @retval -ENODEV No proper handler found. * @retval -ENODEV No proper handler found.
@ -462,7 +463,7 @@ hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
bool hold_lock = true; bool hold_lock = true;
uint16_t idx; uint16_t idx;
uint64_t address, size, base, end; uint64_t address, size, base, end;
struct mmio_request *mmio_req = &io_req->reqs.mmio; struct acrn_mmio_request *mmio_req = &io_req->reqs.mmio_request;
struct mem_io_node *mmio_handler = NULL; struct mem_io_node *mmio_handler = NULL;
hv_mem_io_handler_t read_write = NULL; hv_mem_io_handler_t read_write = NULL;
void *handler_private_data = NULL; void *handler_private_data = NULL;
@ -518,7 +519,7 @@ hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
* @brief Emulate \p io_req for \p vcpu * @brief Emulate \p io_req for \p vcpu
* *
* Handle an I/O request by either invoking a hypervisor-internal handler or * Handle an I/O request by either invoking a hypervisor-internal handler or
* deliver to VHM. * deliver to HSM.
* *
* @pre vcpu != NULL * @pre vcpu != NULL
* @pre vcpu->vm != NULL * @pre vcpu->vm != NULL
@ -528,7 +529,7 @@ hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
* @param io_req The I/O request holding the details of the MMIO access * @param io_req The I/O request holding the details of the MMIO access
* *
* @retval 0 Successfully emulated by registered handlers. * @retval 0 Successfully emulated by registered handlers.
* @retval IOREQ_PENDING The I/O request is delivered to VHM. * @retval ACRN_IOREQ_STATE_PENDING The I/O request is delivered to HSM.
* @retval -EIO The request spans multiple devices and cannot be emulated. * @retval -EIO The request spans multiple devices and cannot be emulated.
* @retval -EINVAL \p io_req has an invalid io_type. * @retval -EINVAL \p io_req has an invalid io_type.
* @retval <0 on other errors during emulation. * @retval <0 on other errors during emulation.
@ -542,14 +543,14 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
vm_config = get_vm_config(vcpu->vm->vm_id); vm_config = get_vm_config(vcpu->vm->vm_id);
switch (io_req->io_type) { switch (io_req->io_type) {
case REQ_PORTIO: case ACRN_IOREQ_TYPE_PORTIO:
status = hv_emulate_pio(vcpu, io_req); status = hv_emulate_pio(vcpu, io_req);
if (status == 0) { if (status == 0) {
emulate_pio_complete(vcpu, io_req); emulate_pio_complete(vcpu, io_req);
} }
break; break;
case REQ_MMIO: case ACRN_IOREQ_TYPE_MMIO:
case REQ_WP: case ACRN_IOREQ_TYPE_WP:
status = hv_emulate_mmio(vcpu, io_req); status = hv_emulate_mmio(vcpu, io_req);
if (status == 0) { if (status == 0) {
emulate_mmio_complete(vcpu, io_req); emulate_mmio_complete(vcpu, io_req);
@ -563,9 +564,9 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
if ((status == -ENODEV) && (vm_config->load_order == POST_LAUNCHED_VM)) { if ((status == -ENODEV) && (vm_config->load_order == POST_LAUNCHED_VM)) {
/* /*
* No handler from HV side, search from VHM in Dom0 * No handler from HV side, search from HSM in Service VM
* *
* ACRN insert request to VHM and inject upcall. * ACRN insert request to HSM and inject upcall.
*/ */
status = acrn_insert_request(vcpu, io_req); status = acrn_insert_request(vcpu, io_req);
if (status == 0) { if (status == 0) {
@ -574,7 +575,7 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
/* here for both IO & MMIO, the direction, address, /* here for both IO & MMIO, the direction, address,
* size definition is same * size definition is same
*/ */
struct pio_request *pio_req = &io_req->reqs.pio; struct acrn_pio_request *pio_req = &io_req->reqs.pio_request;
pr_fatal("%s Err: access dir %d, io_type %d, addr = 0x%lx, size=%lu", __func__, pr_fatal("%s Err: access dir %d, io_type %d, addr = 0x%lx, size=%lu", __func__,
pio_req->direction, io_req->io_type, pio_req->direction, io_req->io_type,

View File

@ -84,7 +84,7 @@ static uint32_t ioapic_pin_to_vpin(struct acrn_vm *vm, const struct acrn_vm_conf
static int32_t vgpio_mmio_handler(struct io_request *io_req, void *data) static int32_t vgpio_mmio_handler(struct io_request *io_req, void *data)
{ {
struct mmio_request *mmio = &io_req->reqs.mmio; struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
struct acrn_vm *vm = (struct acrn_vm *) data; struct acrn_vm *vm = (struct acrn_vm *) data;
struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id); struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
int32_t ret = 0; int32_t ret = 0;
@ -99,7 +99,7 @@ static int32_t vgpio_mmio_handler(struct io_request *io_req, void *data)
/* all gpio registers have 4 bytes size */ /* all gpio registers have 4 bytes size */
if (mmio->size == 4U) { if (mmio->size == 4U) {
if (mmio->direction == REQUEST_READ) { if (mmio->direction == ACRN_IOREQ_DIR_READ) {
padbar = mmio_read32((const void *)hpa2hva((hpa & ~P2SB_PCR_SPACE_MASK) + GPIO_PADBAR)); padbar = mmio_read32((const void *)hpa2hva((hpa & ~P2SB_PCR_SPACE_MASK) + GPIO_PADBAR));
pad0 = padbar & P2SB_PCR_SPACE_MASK; pad0 = padbar & P2SB_PCR_SPACE_MASK;
value = mmio_read32((const void *)hva); value = mmio_read32((const void *)hva);

View File

@ -556,7 +556,7 @@ get_vm_gsicount(const struct acrn_vm *vm)
int32_t vioapic_mmio_access_handler(struct io_request *io_req, void *handler_private_data) int32_t vioapic_mmio_access_handler(struct io_request *io_req, void *handler_private_data)
{ {
struct acrn_single_vioapic *vioapic = (struct acrn_single_vioapic *)handler_private_data; struct acrn_single_vioapic *vioapic = (struct acrn_single_vioapic *)handler_private_data;
struct mmio_request *mmio = &io_req->reqs.mmio; struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
uint64_t gpa = mmio->address; uint64_t gpa = mmio->address;
int32_t ret = 0; int32_t ret = 0;
@ -564,10 +564,10 @@ int32_t vioapic_mmio_access_handler(struct io_request *io_req, void *handler_pri
if (mmio->size == 4UL) { if (mmio->size == 4UL) {
uint32_t data = (uint32_t)mmio->value; uint32_t data = (uint32_t)mmio->value;
if (mmio->direction == REQUEST_READ) { if (mmio->direction == ACRN_IOREQ_DIR_READ) {
vioapic_mmio_rw(vioapic, gpa, &data, true); vioapic_mmio_rw(vioapic, gpa, &data, true);
mmio->value = (uint64_t)data; mmio->value = (uint64_t)data;
} else if (mmio->direction == REQUEST_WRITE) { } else if (mmio->direction == ACRN_IOREQ_DIR_WRITE) {
vioapic_mmio_rw(vioapic, gpa, &data, false); vioapic_mmio_rw(vioapic, gpa, &data, false);
} else { } else {
ret = -EINVAL; ret = -EINVAL;

View File

@ -188,7 +188,7 @@ static void create_ivshmem_device(struct pci_vdev *vdev)
static int32_t ivshmem_mmio_handler(struct io_request *io_req, void *data) static int32_t ivshmem_mmio_handler(struct io_request *io_req, void *data)
{ {
union ivshmem_doorbell doorbell; union ivshmem_doorbell doorbell;
struct mmio_request *mmio = &io_req->reqs.mmio; struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
struct pci_vdev *vdev = (struct pci_vdev *) data; struct pci_vdev *vdev = (struct pci_vdev *) data;
struct ivshmem_device *ivs_dev = (struct ivshmem_device *) vdev->priv_data; struct ivshmem_device *ivs_dev = (struct ivshmem_device *) vdev->priv_data;
uint64_t offset = mmio->address - vdev->vbars[IVSHMEM_MMIO_BAR].base_gpa; uint64_t offset = mmio->address - vdev->vbars[IVSHMEM_MMIO_BAR].base_gpa;
@ -200,7 +200,7 @@ static int32_t ivshmem_mmio_handler(struct io_request *io_req, void *data)
* IVSHMEM_IV_POS_REG is Read-Only register and IVSHMEM_DOORBELL_REG * IVSHMEM_IV_POS_REG is Read-Only register and IVSHMEM_DOORBELL_REG
* is Write-Only register, they are used for interrupt. * is Write-Only register, they are used for interrupt.
*/ */
if (mmio->direction == REQUEST_READ) { if (mmio->direction == ACRN_IOREQ_DIR_READ) {
if (offset != IVSHMEM_DOORBELL_REG) { if (offset != IVSHMEM_DOORBELL_REG) {
mmio->value = ivs_dev->mmio.data[offset >> 2U]; mmio->value = ivs_dev->mmio.data[offset >> 2U];
} else { } else {

View File

@ -129,7 +129,7 @@ static void remap_one_vmsix_entry(const struct pci_vdev *vdev, uint32_t index)
*/ */
static int32_t pt_vmsix_handle_table_mmio_access(struct io_request *io_req, void *priv_data) static int32_t pt_vmsix_handle_table_mmio_access(struct io_request *io_req, void *priv_data)
{ {
struct mmio_request *mmio = &io_req->reqs.mmio; struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
struct pci_vdev *vdev; struct pci_vdev *vdev;
uint32_t index; uint32_t index;
int32_t ret = 0; int32_t ret = 0;
@ -138,7 +138,7 @@ static int32_t pt_vmsix_handle_table_mmio_access(struct io_request *io_req, void
if (vdev->user == vdev) { if (vdev->user == vdev) {
index = rw_vmsix_table(vdev, io_req); index = rw_vmsix_table(vdev, io_req);
if ((mmio->direction == REQUEST_WRITE) && (index < vdev->msix.table_count)) { if ((mmio->direction == ACRN_IOREQ_DIR_WRITE) && (index < vdev->msix.table_count)) {
if (vdev->msix.is_vmsix_on_msi) { if (vdev->msix.is_vmsix_on_msi) {
remap_one_vmsix_entry_on_msi(vdev, index); remap_one_vmsix_entry_on_msi(vdev, index);
} else { } else {

View File

@ -43,7 +43,7 @@ static int32_t read_vmcs9900_cfg(const struct pci_vdev *vdev,
static int32_t vmcs9900_mmio_handler(struct io_request *io_req, void *data) static int32_t vmcs9900_mmio_handler(struct io_request *io_req, void *data)
{ {
struct mmio_request *mmio = &io_req->reqs.mmio; struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
struct pci_vdev *vdev = (struct pci_vdev *)data; struct pci_vdev *vdev = (struct pci_vdev *)data;
struct acrn_vuart *vu = vdev->priv_data; struct acrn_vuart *vu = vdev->priv_data;
struct pci_vbar *vbar = &vdev->vbars[MCS9900_MMIO_BAR]; struct pci_vbar *vbar = &vdev->vbars[MCS9900_MMIO_BAR];
@ -51,7 +51,7 @@ static int32_t vmcs9900_mmio_handler(struct io_request *io_req, void *data)
offset = mmio->address - vbar->base_gpa; offset = mmio->address - vbar->base_gpa;
if (mmio->direction == REQUEST_READ) { if (mmio->direction == ACRN_IOREQ_DIR_READ) {
mmio->value = vuart_read_reg(vu, offset); mmio->value = vuart_read_reg(vu, offset);
} else { } else {
vuart_write_reg(vu, offset, (uint8_t) mmio->value); vuart_write_reg(vu, offset, (uint8_t) mmio->value);

View File

@ -67,7 +67,7 @@ bool write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes,
*/ */
uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req) uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req)
{ {
struct mmio_request *mmio = &io_req->reqs.mmio; struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
struct msix_table_entry *entry; struct msix_table_entry *entry;
uint32_t entry_offset, table_offset, index = CONFIG_MAX_MSIX_TABLE_NUM; uint32_t entry_offset, table_offset, index = CONFIG_MAX_MSIX_TABLE_NUM;
uint64_t offset; uint64_t offset;
@ -83,14 +83,14 @@ uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req)
entry = &vdev->msix.table_entries[index]; entry = &vdev->msix.table_entries[index];
entry_offset = table_offset % MSIX_TABLE_ENTRY_SIZE; entry_offset = table_offset % MSIX_TABLE_ENTRY_SIZE;
if (mmio->direction == REQUEST_READ) { if (mmio->direction == ACRN_IOREQ_DIR_READ) {
(void)memcpy_s(&mmio->value, (size_t)mmio->size, (void)memcpy_s(&mmio->value, (size_t)mmio->size,
(void *)entry + entry_offset, (size_t)mmio->size); (void *)entry + entry_offset, (size_t)mmio->size);
} else { } else {
(void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size, (void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size,
&mmio->value, (size_t)mmio->size); &mmio->value, (size_t)mmio->size);
} }
} else if (mmio->direction == REQUEST_READ) { } else if (mmio->direction == ACRN_IOREQ_DIR_READ) {
mmio->value = 0UL; mmio->value = 0UL;
} }
} else { } else {

View File

@ -55,7 +55,7 @@ static bool vpci_pio_cfgaddr_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t
uint32_t val = ~0U; uint32_t val = ~0U;
struct acrn_vpci *vpci = &vcpu->vm->vpci; struct acrn_vpci *vpci = &vcpu->vm->vpci;
union pci_cfg_addr_reg *cfg_addr = &vpci->addr; union pci_cfg_addr_reg *cfg_addr = &vpci->addr;
struct pio_request *pio_req = &vcpu->req.reqs.pio; struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
if ((addr == (uint16_t)PCI_CONFIG_ADDR) && (bytes == 4U)) { if ((addr == (uint16_t)PCI_CONFIG_ADDR) && (bytes == 4U)) {
val = cfg_addr->value; val = cfg_addr->value;
@ -121,7 +121,7 @@ static bool vpci_pio_cfgdata_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t
union pci_bdf bdf; union pci_bdf bdf;
uint16_t offset = addr - PCI_CONFIG_DATA; uint16_t offset = addr - PCI_CONFIG_DATA;
uint32_t val = ~0U; uint32_t val = ~0U;
struct pio_request *pio_req = &vcpu->req.reqs.pio; struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
cfg_addr.value = atomic_readandclear32(&vpci->addr.value); cfg_addr.value = atomic_readandclear32(&vpci->addr.value);
if (cfg_addr.bits.enable != 0U) { if (cfg_addr.bits.enable != 0U) {
@ -174,7 +174,7 @@ static bool vpci_pio_cfgdata_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t
static int32_t vpci_mmio_cfg_access(struct io_request *io_req, void *private_data) static int32_t vpci_mmio_cfg_access(struct io_request *io_req, void *private_data)
{ {
int32_t ret = 0; int32_t ret = 0;
struct mmio_request *mmio = &io_req->reqs.mmio; struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
struct acrn_vpci *vpci = (struct acrn_vpci *)private_data; struct acrn_vpci *vpci = (struct acrn_vpci *)private_data;
uint64_t pci_mmcofg_base = vpci->pci_mmcfg.address; uint64_t pci_mmcofg_base = vpci->pci_mmcfg.address;
uint64_t address = mmio->address; uint64_t address = mmio->address;
@ -192,7 +192,7 @@ static int32_t vpci_mmio_cfg_access(struct io_request *io_req, void *private_dat
*/ */
bdf.value = (uint16_t)((address - pci_mmcofg_base) >> 12U); bdf.value = (uint16_t)((address - pci_mmcofg_base) >> 12U);
if (mmio->direction == REQUEST_READ) { if (mmio->direction == ACRN_IOREQ_DIR_READ) {
ret = vpci_read_cfg(vpci, bdf, reg_num, (uint32_t)mmio->size, (uint32_t *)&mmio->value); ret = vpci_read_cfg(vpci, bdf, reg_num, (uint32_t)mmio->size, (uint32_t *)&mmio->value);
} else { } else {
ret = vpci_write_cfg(vpci, bdf, reg_num, (uint32_t)mmio->size, (uint32_t)mmio->value); ret = vpci_write_cfg(vpci, bdf, reg_num, (uint32_t)mmio->size, (uint32_t)mmio->value);

View File

@ -813,7 +813,7 @@ static int32_t vpic_primary_handler(struct acrn_vpic *vpic, bool in, uint16_t po
*/ */
static bool vpic_primary_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width) static bool vpic_primary_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
{ {
struct pio_request *pio_req = &vcpu->req.reqs.pio; struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
if (vpic_primary_handler(vm_pic(vcpu->vm), true, addr, width, &pio_req->value) < 0) { if (vpic_primary_handler(vm_pic(vcpu->vm), true, addr, width, &pio_req->value) < 0) {
pr_err("Primary vPIC read port 0x%x width=%d failed\n", pr_err("Primary vPIC read port 0x%x width=%d failed\n",
@ -865,7 +865,7 @@ static int32_t vpic_secondary_handler(struct acrn_vpic *vpic, bool in, uint16_t
*/ */
static bool vpic_secondary_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width) static bool vpic_secondary_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
{ {
struct pio_request *pio_req = &vcpu->req.reqs.pio; struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
if (vpic_secondary_handler(vm_pic(vcpu->vm), true, addr, width, &pio_req->value) < 0) { if (vpic_secondary_handler(vm_pic(vcpu->vm), true, addr, width, &pio_req->value) < 0) {
pr_err("Secondary vPIC read port 0x%x width=%d failed\n", pr_err("Secondary vPIC read port 0x%x width=%d failed\n",
@ -943,7 +943,7 @@ static int32_t vpic_elc_handler(struct acrn_vpic *vpic, bool in, uint16_t port,
*/ */
static bool vpic_elc_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width) static bool vpic_elc_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
{ {
struct pio_request *pio_req = &vcpu->req.reqs.pio; struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
if (vpic_elc_handler(vm_pic(vcpu->vm), true, addr, width, &pio_req->value) < 0) { if (vpic_elc_handler(vm_pic(vcpu->vm), true, addr, width, &pio_req->value) < 0) {
pr_err("pic elc read port 0x%x width=%d failed", addr, width); pr_err("pic elc read port 0x%x width=%d failed", addr, width);

View File

@ -51,7 +51,7 @@ static uint8_t cmos_get_reg_val(uint8_t addr)
static bool vrtc_read(struct acrn_vcpu *vcpu, uint16_t addr, __unused size_t width) static bool vrtc_read(struct acrn_vcpu *vcpu, uint16_t addr, __unused size_t width)
{ {
uint8_t offset; uint8_t offset;
struct pio_request *pio_req = &vcpu->req.reqs.pio; struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
struct acrn_vm *vm = vcpu->vm; struct acrn_vm *vm = vcpu->vm;
offset = vm->vrtc_offset; offset = vm->vrtc_offset;

View File

@ -537,7 +537,7 @@ static bool vuart_read(struct acrn_vcpu *vcpu, uint16_t offset_arg, __unused siz
{ {
uint16_t offset = offset_arg; uint16_t offset = offset_arg;
struct acrn_vuart *vu = find_vuart_by_port(vcpu->vm, offset); struct acrn_vuart *vu = find_vuart_by_port(vcpu->vm, offset);
struct pio_request *pio_req = &vcpu->req.reqs.pio; struct acrn_pio_request *pio_req = &vcpu->req.reqs.pio_request;
if (vu != NULL) { if (vu != NULL) {
offset -= vu->port_base; offset -= vu->port_base;

View File

@ -52,7 +52,7 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu);
* @pre io_req->io_type == REQ_PORTIO * @pre io_req->io_type == REQ_PORTIO
* *
* @remark This function must be called when \p io_req is completed, after * @remark This function must be called when \p io_req is completed, after
* either a previous call to emulate_io() returning 0 or the corresponding VHM * either a previous call to emulate_io() returning 0 or the corresponding HSM
* request having transferred to the COMPLETE state. * request having transferred to the COMPLETE state.
*/ */
void emulate_pio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req); void emulate_pio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req);
@ -82,10 +82,10 @@ void allow_guest_pio_access(struct acrn_vm *vm, uint16_t port_address, uint32_
void deny_guest_pio_access(struct acrn_vm *vm, uint16_t port_address, uint32_t nbytes); void deny_guest_pio_access(struct acrn_vm *vm, uint16_t port_address, uint32_t nbytes);
/** /**
* @brief Fire VHM interrupt to SOS * @brief Fire HSM interrupt to SOS
* *
* @return None * @return None
*/ */
void arch_fire_vhm_interrupt(void); void arch_fire_hsm_interrupt(void);
#endif /* IO_EMUL_H */ #endif /* IO_EMUL_H */

View File

@ -40,7 +40,7 @@
*/ */
#define NR_STATIC_MAPPINGS (NR_STATIC_MAPPINGS_1 + CONFIG_MAX_VM_NUM) #define NR_STATIC_MAPPINGS (NR_STATIC_MAPPINGS_1 + CONFIG_MAX_VM_NUM)
#define HYPERVISOR_CALLBACK_VHM_VECTOR 0xF3U #define HYPERVISOR_CALLBACK_HSM_VECTOR 0xF3U
/* vectors range for dynamic allocation, usually for devices */ /* vectors range for dynamic allocation, usually for devices */
#define VECTOR_DYNAMIC_START 0x20U #define VECTOR_DYNAMIC_START 0x20U

View File

@ -210,8 +210,7 @@ int32_t hcall_inject_msi(struct acrn_vcpu *vcpu, struct acrn_vm *target_vm, uint
* @param vcpu Pointer to vCPU that initiates the hypercall * @param vcpu Pointer to vCPU that initiates the hypercall
* @param target_vm Pointer to target VM data structure * @param target_vm Pointer to target VM data structure
* @param param1 not used * @param param1 not used
* @param param2 guest physical address. This gpa points to * @param param2 guest physical address. This gpa points to buffer address
* struct acrn_set_ioreq_buffer
* *
* @pre is_sos_vm(vcpu->vm) * @pre is_sos_vm(vcpu->vm)
* @return 0 on success, non-zero on error. * @return 0 on success, non-zero on error.

View File

@ -25,14 +25,19 @@ struct io_request {
/** /**
* @brief Type of the request (PIO, MMIO, etc). * @brief Type of the request (PIO, MMIO, etc).
* *
* Refer to vhm_request for detailed description of I/O request types. * Refer to acrn_io_request for detailed description of I/O request types.
*/ */
uint32_t io_type; uint32_t io_type;
/** /**
* @brief Details of this request in the same format as vhm_request. * @brief Details of this request in the same format as acrn_io_request.
*/ */
union vhm_io_request reqs; union {
struct acrn_pio_request pio_request;
struct acrn_pci_request pci_request;
struct acrn_mmio_request mmio_request;
uint64_t data[8];
} reqs;
}; };
/** /**
@ -105,7 +110,6 @@ struct vm_io_handler_desc {
}; };
/* Typedef for MMIO handler and range check routine */ /* Typedef for MMIO handler and range check routine */
struct mmio_request;
typedef int32_t (*hv_mem_io_handler_t)(struct io_request *io_req, void *handler_private_data); typedef int32_t (*hv_mem_io_handler_t)(struct io_request *io_req, void *handler_private_data);
/** /**
@ -187,45 +191,45 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
void reset_vm_ioreqs(struct acrn_vm *vm); void reset_vm_ioreqs(struct acrn_vm *vm);
/** /**
* @brief Get the state of VHM request * @brief Get the state of an IO request
* *
* @param vm Target VM context * @param vm Target VM context
* @param vhm_req_id VHM Request ID * @param vcpu_id VCPU ID of the IO request
* *
* @return State of the IO Request. * @return State of the IO Request.
*/ */
uint32_t get_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id); uint32_t get_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id);
/** /**
* @brief Set the state of VHM request * @brief Set the state of IO request
* *
* @param vm Target VM context * @param vm Target VM context
* @param vhm_req_id VHM Request ID * @param vcpu_id VCPU ID of the IO request
* @param state State to be set * @param state State to be set
* @return None * @return None
*/ */
void set_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id, uint32_t state); void set_io_req_state(struct acrn_vm *vm, uint16_t vcpu_id, uint32_t state);
/** /**
* @brief Set the vector for HV callback VHM * @brief Set the vector for HV callback HSM
* *
* @param vector vector for HV callback VHM * @param vector vector for HV callback HSM
* @return None * @return None
*/ */
void set_vhm_notification_vector(uint32_t vector); void set_hsm_notification_vector(uint32_t vector);
/** /**
* @brief Get the vector for HV callback VHM * @brief Get the vector for HV callback HSM
* *
* @return vector for HV callbakc VH * @return vector for HV callbakc HSM
*/ */
uint32_t get_vhm_notification_vector(void); uint32_t get_hsm_notification_vector(void);
/** /**
* @brief Emulate \p io_req for \p vcpu * @brief Emulate \p io_req for \p vcpu
* *
* Handle an I/O request by either invoking a hypervisor-internal handler or * Handle an I/O request by either invoking a hypervisor-internal handler or
* deliver to VHM. * deliver to HSM.
* *
* @pre vcpu != NULL * @pre vcpu != NULL
* @pre vcpu->vm != NULL * @pre vcpu->vm != NULL
@ -235,7 +239,7 @@ uint32_t get_vhm_notification_vector(void);
* @param io_req The I/O request holding the details of the MMIO access * @param io_req The I/O request holding the details of the MMIO access
* *
* @retval 0 Successfully emulated by registered handlers. * @retval 0 Successfully emulated by registered handlers.
* @retval IOREQ_PENDING The I/O request is delivered to VHM. * @retval IOREQ_PENDING The I/O request is delivered to HSM.
* @retval -EIO The request spans multiple devices and cannot be emulated. * @retval -EIO The request spans multiple devices and cannot be emulated.
* @retval -EINVAL \p io_req has an invalid type. * @retval -EINVAL \p io_req has an invalid type.
* @retval <0 on other errors during emulation. * @retval <0 on other errors during emulation.

View File

@ -18,26 +18,29 @@
#include <types.h> #include <types.h>
/* /*
* Common structures for ACRN/VHM/DM * Common structures for ACRN/HSM/DM
*/ */
/* /*
* IO request * IO request
*/ */
#define VHM_REQUEST_MAX 16U
#define REQ_STATE_FREE 3U #define ACRN_IO_REQUEST_MAX 16U
#define REQ_STATE_PENDING 0U
#define REQ_STATE_COMPLETE 1U #define ACRN_IOREQ_STATE_PENDING 0U
#define REQ_STATE_PROCESSING 2U #define ACRN_IOREQ_STATE_COMPLETE 1U
#define ACRN_IOREQ_STATE_PROCESSING 2U
#define ACRN_IOREQ_STATE_FREE 3U
#define ACRN_IOREQ_TYPE_PORTIO 0U
#define ACRN_IOREQ_TYPE_MMIO 1U
#define ACRN_IOREQ_TYPE_PCICFG 2U
#define ACRN_IOREQ_TYPE_WP 3U
#define ACRN_IOREQ_DIR_READ 0U
#define ACRN_IOREQ_DIR_WRITE 1U
#define REQ_PORTIO 0U
#define REQ_MMIO 1U
#define REQ_PCICFG 2U
#define REQ_WP 3U
#define REQUEST_READ 0U
#define REQUEST_WRITE 1U
/* IOAPIC device model info */ /* IOAPIC device model info */
#define VIOAPIC_RTE_NUM 48U /* vioapic pins */ #define VIOAPIC_RTE_NUM 48U /* vioapic pins */
@ -72,11 +75,11 @@
/** /**
* @brief Representation of a MMIO request * @brief Representation of a MMIO request
*/ */
struct mmio_request { struct acrn_mmio_request {
/** /**
* @brief Direction of the access * @brief Direction of the access
* *
* Either \p REQUEST_READ or \p REQUEST_WRITE. * Either \p ACRN_IOREQ_DIR_READ or \p ACRN_IOREQ_DIR_WRITE.
*/ */
uint32_t direction; uint32_t direction;
@ -104,11 +107,11 @@ struct mmio_request {
/** /**
* @brief Representation of a port I/O request * @brief Representation of a port I/O request
*/ */
struct pio_request { struct acrn_pio_request {
/** /**
* @brief Direction of the access * @brief Direction of the access
* *
* Either \p REQUEST_READ or \p REQUEST_WRITE. * Either \p ACRN_IOREQ_DIR_READ or \p ACRN_IOREQ_DIR_WRITE.
*/ */
uint32_t direction; uint32_t direction;
@ -136,11 +139,11 @@ struct pio_request {
/** /**
* @brief Representation of a PCI configuration space access * @brief Representation of a PCI configuration space access
*/ */
struct pci_request { struct acrn_pci_request {
/** /**
* @brief Direction of the access * @brief Direction of the access
* *
* Either \p REQUEST_READ or \p REQUEST_WRITE. * Either \p ACRN_IOREQ_DIR_READ or \p ACRN_IOREQ_DIR_WRITE.
*/ */
uint32_t direction; uint32_t direction;
@ -180,28 +183,21 @@ struct pci_request {
int32_t reg; int32_t reg;
} __aligned(8); } __aligned(8);
union vhm_io_request {
struct pio_request pio;
struct pci_request pci;
struct mmio_request mmio;
int64_t reserved1[8];
};
/** /**
* @brief 256-byte VHM requests * @brief 256-byte I/O requests
* *
* The state transitions of a VHM request are: * The state transitions of a I/O request are:
* *
* FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ... * FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
* *
* When a request is in COMPLETE or FREE state, the request is owned by the * When a request is in COMPLETE or FREE state, the request is owned by the
* hypervisor. SOS (VHM or DM) shall not read or write the internals of the * hypervisor. SOS (HSM or DM) shall not read or write the internals of the
* request except the state. * request except the state.
* *
* When a request is in PENDING or PROCESSING state, the request is owned by * When a request is in PENDING or PROCESSING state, the request is owned by
* SOS. The hypervisor shall not read or write the request other than the state. * SOS. The hypervisor shall not read or write the request other than the state.
* *
* Based on the rules above, a typical VHM request lifecycle should looks like * Based on the rules above, a typical I/O request lifecycle should looks like
* the following. * the following.
* *
* @verbatim embed:rst:leading-asterisk * @verbatim embed:rst:leading-asterisk
@ -270,9 +266,9 @@ union vhm_io_request {
* the hypervisor, as the hypervisor shall not access the request any more. * the hypervisor, as the hypervisor shall not access the request any more.
* *
* 2. Due to similar reasons, setting state to COMPLETE is the last operation * 2. Due to similar reasons, setting state to COMPLETE is the last operation
* of request handling in VHM or clients in SOS. * of request handling in HSM or clients in SOS.
*/ */
struct vhm_request { struct acrn_io_request {
/** /**
* @brief Type of this request. * @brief Type of this request.
* *
@ -297,13 +293,14 @@ struct vhm_request {
/** /**
* @brief Details about this request. * @brief Details about this request.
* *
* For REQ_PORTIO, this has type
* pio_request. For REQ_MMIO and REQ_WP, this has type mmio_request. For
* REQ_PCICFG, this has type pci_request.
*
* Byte offset: 64. * Byte offset: 64.
*/ */
union vhm_io_request reqs; union {
struct acrn_pio_request pio_request;
struct acrn_pci_request pci_request;
struct acrn_mmio_request mmio_request;
uint64_t data[8];
} reqs;
/** /**
* @brief Reserved. * @brief Reserved.
@ -313,27 +310,27 @@ struct vhm_request {
uint32_t reserved1; uint32_t reserved1;
/** /**
* @brief The client which is distributed to handle this request. * @brief If this request has been handled by HSM driver.
*
* Accessed by VHM only.
* *
* Byte offset: 132. * Byte offset: 132.
*/ */
int32_t client; int32_t kernel_handled;
/** /**
* @brief The status of this request. * @brief The status of this request.
* *
* Taking REQ_STATE_xxx as values. * Taking ACRN_IOREQ_STATE_xxx as values.
* *
* Byte offset: 136. * Byte offset: 136.
*/ */
uint32_t processed; uint32_t processed;
} __aligned(256); } __aligned(256);
union vhm_request_buffer { struct acrn_io_request_buffer {
struct vhm_request req_queue[VHM_REQUEST_MAX]; union {
int8_t reserved[4096]; struct acrn_io_request req_slot[ACRN_IO_REQUEST_MAX];
int8_t reserved[4096];
};
} __aligned(4096); } __aligned(4096);
/** /**

View File

@ -63,4 +63,4 @@ CTASSERT(CPU_CONTEXT_OFFSET_LDTR - CPU_CONTEXT_OFFSET_EXTCTX_START
CTASSERT((sizeof(struct trusty_startup_param) CTASSERT((sizeof(struct trusty_startup_param)
+ sizeof(struct trusty_key_info)) < 0x1000U); + sizeof(struct trusty_key_info)) < 0x1000U);
CTASSERT(NR_WORLD == 2); CTASSERT(NR_WORLD == 2);
CTASSERT(sizeof(struct vhm_request) == (4096U/VHM_REQUEST_MAX)); CTASSERT(sizeof(struct acrn_io_request) == (4096U/ACRN_IO_REQUEST_MAX));